problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.71k
18.9k
golden_diff
stringlengths
145
5.13k
verification_info
stringlengths
465
23.6k
num_tokens_prompt
int64
556
4.1k
num_tokens_diff
int64
47
1.02k
gh_patches_debug_27441
rasdani/github-patches
git_diff
ivy-llc__ivy-13559
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> index_add </issue> <code> [start of ivy/functional/frontends/torch/indexing_slicing_joining_mutating_ops.py] 1 # local 2 import ivy 3 from ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back 4 5 6 @to_ivy_arrays_and_back 7 def adjoint(input): 8 return ivy.adjoint(input) 9 10 11 @to_ivy_arrays_and_back 12 def cat(tensors, dim=0, *, out=None): 13 return ivy.concat(tensors, axis=dim, out=out) 14 15 16 @to_ivy_arrays_and_back 17 def chunk(input, chunks, dim=0): 18 if ivy.shape(input) == (): 19 return [input] 20 else: 21 dim_size = ivy.shape(input)[dim] 22 chunk_size = dim_size // chunks 23 if chunk_size == 0: 24 return ivy.split(input, num_or_size_splits=dim_size, axis=dim) 25 else: 26 remainder = dim_size % chunks 27 if remainder == 0: 28 return ivy.split(input, num_or_size_splits=chunks, axis=dim) 29 else: 30 return ivy.split( 31 input, 32 num_or_size_splits=tuple( 33 [chunk_size + remainder] + [chunk_size] * (chunks - 1) 34 ), 35 axis=dim, 36 ) 37 38 39 @to_ivy_arrays_and_back 40 def concat(tensors, dim=0, *, out=None): 41 return ivy.concat(tensors, axis=dim, out=out) 42 43 44 @to_ivy_arrays_and_back 45 def gather(input, dim, index, *, sparse_grad=False, out=None): 46 if sparse_grad: 47 raise ivy.utils.exceptions.IvyException( 48 "Gather does not yet support the sparse grad functionality" 49 ) 50 51 dim = dim % len(input.shape) 52 all_indices = ivy.argwhere(ivy.full(index.shape, True)) 53 gather_locations = ivy.reshape(index, [ivy.prod(ivy.array(index.shape))]) 54 55 gather_indices = [] 56 for axis in range(len(index.shape)): 57 if axis == dim: 58 gather_indices.append(ivy.array(gather_locations, dtype=index.dtype)) 59 else: 60 gather_indices.append(ivy.array(all_indices[:, axis], dtype=index.dtype)) 61 62 gather_indices = ivy.stack(gather_indices, axis=-1) 63 gathered = ivy.gather_nd(input, gather_indices) 64 reshaped = ivy.reshape(gathered, index.shape) 65 return reshaped 66 67 68 @to_ivy_arrays_and_back 69 def nonzero(input, *, out=None, as_tuple=False): 70 ret = ivy.nonzero(input) 71 if as_tuple is False: 72 ret = ivy.matrix_transpose(ivy.stack(ret)) 73 74 if ivy.exists(out): 75 return ivy.inplace_update(out, ret) 76 return ret 77 78 79 @to_ivy_arrays_and_back 80 def permute(input, dims): 81 return ivy.permute_dims(input, axes=dims) 82 83 84 @to_ivy_arrays_and_back 85 def reshape(input, shape): 86 return ivy.reshape(input, shape) 87 88 89 @to_ivy_arrays_and_back 90 def squeeze(input, dim): 91 if isinstance(dim, int) and input.ndim > 0: 92 if input.shape[dim] > 1: 93 return input 94 return ivy.squeeze(input, dim) 95 96 97 @to_ivy_arrays_and_back 98 def stack(tensors, dim=0, *, out=None): 99 return ivy.stack(tensors, axis=dim, out=out) 100 101 102 @to_ivy_arrays_and_back 103 def swapaxes(input, axis0, axis1): 104 return ivy.swapaxes(input, axis0, axis1) 105 106 107 @to_ivy_arrays_and_back 108 def swapdims(input, dim0, dim1): 109 return ivy.swapaxes(input, dim0, dim1) 110 111 112 @to_ivy_arrays_and_back 113 def transpose(input, dim0, dim1): 114 return ivy.swapaxes(input, dim0, dim1) 115 116 117 @to_ivy_arrays_and_back 118 def t(input): 119 if input.ndim > 2: 120 raise ivy.utils.exceptions.IvyException( 121 "t(input) expects a tensor with <= 2 dimensions, but self is %dD" 122 % input.ndim 123 ) 124 if input.ndim == 2: 125 return ivy.swapaxes(input, 0, 1) 126 else: 127 return input 128 129 130 @to_ivy_arrays_and_back 131 def tile(input, dims): 132 try: 133 tup = tuple(dims) 134 except TypeError: 135 tup = (dims,) 136 d = len(tup) 137 res = 0 138 if len(input.shape) > len([dims]) - 1: 139 res = input 140 if d < input.ndim: 141 tup = (1,) * (input.ndim - d) + tup 142 res = ivy.tile(input, tup) 143 144 else: 145 res = ivy.tile(input, repeats=dims, out=None) 146 return res 147 148 149 @to_ivy_arrays_and_back 150 def unsqueeze(input, dim=0): 151 return ivy.expand_dims(input, axis=dim) 152 153 154 @to_ivy_arrays_and_back 155 def argwhere(input): 156 return ivy.argwhere(input) 157 158 159 @to_ivy_arrays_and_back 160 def movedim(input, source, destination): 161 return ivy.moveaxis(input, source, destination) 162 163 164 @to_ivy_arrays_and_back 165 def moveaxis(input, source, destination): 166 return ivy.moveaxis(input, source, destination) 167 168 169 @to_ivy_arrays_and_back 170 def hstack(tensors, *, out=None): 171 return ivy.hstack(tensors, out=out) 172 173 174 @to_ivy_arrays_and_back 175 def index_select(input, dim, index, *, out=None): 176 return ivy.gather(input, index, axis=dim, out=out) 177 178 179 @to_ivy_arrays_and_back 180 def dstack(tensors, *, out=None): 181 return ivy.dstack(tensors, out=out) 182 183 184 @to_ivy_arrays_and_back 185 def take_along_dim(input, indices, dim, *, out=None): 186 return ivy.take_along_axis(input, indices, dim, out=out) 187 188 189 @to_ivy_arrays_and_back 190 def vstack(tensors, *, out=None): 191 return ivy.vstack(tensors, out=out) 192 193 194 @to_ivy_arrays_and_back 195 def split(tensor, split_size_or_sections, dim=0): 196 if isinstance(split_size_or_sections, int): 197 split_size = split_size_or_sections 198 split_size_or_sections = [split_size] * (tensor.shape[dim] // split_size) 199 if tensor.shape[dim] % split_size: 200 split_size_or_sections.append(tensor.shape[dim] % split_size) 201 return tuple( 202 ivy.split( 203 tensor, 204 num_or_size_splits=split_size_or_sections, 205 axis=dim, 206 with_remainder=True, 207 ) 208 ) 209 210 211 @to_ivy_arrays_and_back 212 def tensor_split(input, indices_or_sections, dim=0): 213 if isinstance(indices_or_sections, (list, tuple)): 214 indices_or_sections = ( 215 ivy.diff(indices_or_sections, prepend=[0], append=[input.shape[dim]]) 216 .astype(ivy.int8) 217 .to_list() 218 ) 219 return ivy.split( 220 input, num_or_size_splits=indices_or_sections, axis=dim, with_remainder=False 221 ) 222 223 224 @to_ivy_arrays_and_back 225 def unbind(input, dim=0): 226 shape = list(input.shape) 227 shape.pop(dim) 228 return tuple([x.reshape(tuple(shape)) for x in split(input, 1, dim=dim)]) 229 230 231 def _get_indices_or_sections(indices_or_sections, indices, sections): 232 if not ivy.exists(indices_or_sections): 233 if ivy.exists(indices) and not ivy.exists(sections): 234 indices_or_sections = indices 235 elif ivy.exists(sections) and not ivy.exists(indices): 236 indices_or_sections = sections 237 else: 238 raise ivy.utils.exception.IvyError( 239 "got invalid argument for indices_or_sections" 240 ) 241 return indices_or_sections 242 243 244 @to_ivy_arrays_and_back 245 def dsplit(input, indices_or_sections=None, /, *, indices=None, sections=None): 246 indices_or_sections = _get_indices_or_sections( 247 indices_or_sections, indices, sections 248 ) 249 return tuple(ivy.dsplit(input, indices_or_sections)) 250 251 252 @to_ivy_arrays_and_back 253 def hsplit(input, indices_or_sections=None, /, *, indices=None, sections=None): 254 indices_or_sections = _get_indices_or_sections( 255 indices_or_sections, indices, sections 256 ) 257 return tuple(ivy.hsplit(input, indices_or_sections)) 258 259 260 @to_ivy_arrays_and_back 261 def vsplit(input, indices_or_sections=None, /, *, indices=None, sections=None): 262 indices_or_sections = _get_indices_or_sections( 263 indices_or_sections, indices, sections 264 ) 265 return tuple(ivy.vsplit(input, indices_or_sections)) 266 267 268 @to_ivy_arrays_and_back 269 def row_stack(tensors, *, out=None): 270 return ivy.vstack(tensors, out=out) 271 272 273 @to_ivy_arrays_and_back 274 def where(condition, input=None, other=None): 275 if not ivy.exists(input) and not ivy.exists(other): 276 return nonzero(condition, as_tuple=True) 277 return ivy.where(condition, input, other) 278 279 280 @to_ivy_arrays_and_back 281 def conj(input): 282 return ivy.conj(input) 283 [end of ivy/functional/frontends/torch/indexing_slicing_joining_mutating_ops.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ivy/functional/frontends/torch/indexing_slicing_joining_mutating_ops.py b/ivy/functional/frontends/torch/indexing_slicing_joining_mutating_ops.py --- a/ivy/functional/frontends/torch/indexing_slicing_joining_mutating_ops.py +++ b/ivy/functional/frontends/torch/indexing_slicing_joining_mutating_ops.py @@ -280,3 +280,45 @@ @to_ivy_arrays_and_back def conj(input): return ivy.conj(input) + + +@to_ivy_arrays_and_back +def index_add(input, dim, index, source, *, alpha=1, out=None): + # Potential Bug: + # There is an issue with the torch backend (not caused by ivy) + # where half precision (float16) values get ignored in summation: + # + # >>> a = torch.tensor(-14., dtype=torch.float16) + # >>> b = torch.tensor(1.014, dtype=torch.float16) + # >>> a+b + # tensor(-12.9844, dtype=torch.float16) + # >>> a = torch.tensor(-24., dtype=torch.float16) + # >>> a+b + # tensor(-22.9844, dtype=torch.float16) + # >>> a = torch.tensor(-34., dtype=torch.float16) + # >>> a+b + # tensor(-33., dtype=torch.float16) + # >>> + input = ivy.swapaxes(input, dim, 0) + source = ivy.swapaxes(source, dim, 0) + _to_adds = [] + index = sorted(zip(ivy.to_list(index), range(len(index))), key=(lambda x: x[0])) + while index: + _curr_idx = index[0][0] + while len(_to_adds) < _curr_idx: + _to_adds.append(ivy.zeros_like(source[0])) + _to_add_cum = ivy.get_item(source, index[0][1]) + while (1 < len(index)) and (index[0][0] == index[1][0]): + _to_add_cum = ivy.add(_to_add_cum, ivy.get_item(source, index.pop(1)[1])) + index.pop(0) + _to_adds.append(_to_add_cum) + while len(_to_adds) < input.shape[0]: + _to_adds.append(ivy.zeros_like(source[0])) + _to_adds = ivy.stack(_to_adds) + if len(input.shape) < 2: + # Added this line due to the paddle backend treating scalars as 1-d arrays + _to_adds = ivy.flatten(_to_adds) + + ret = ivy.add(input, _to_adds, alpha=alpha) + ret = ivy.swapaxes(ret, 0, dim, out=out) + return ret
{"golden_diff": "diff --git a/ivy/functional/frontends/torch/indexing_slicing_joining_mutating_ops.py b/ivy/functional/frontends/torch/indexing_slicing_joining_mutating_ops.py\n--- a/ivy/functional/frontends/torch/indexing_slicing_joining_mutating_ops.py\n+++ b/ivy/functional/frontends/torch/indexing_slicing_joining_mutating_ops.py\n@@ -280,3 +280,45 @@\n @to_ivy_arrays_and_back\r\n def conj(input):\r\n return ivy.conj(input)\r\n+\r\n+\r\n+@to_ivy_arrays_and_back\r\n+def index_add(input, dim, index, source, *, alpha=1, out=None):\r\n+ # Potential Bug:\r\n+ # There is an issue with the torch backend (not caused by ivy)\r\n+ # where half precision (float16) values get ignored in summation:\r\n+ #\r\n+ # >>> a = torch.tensor(-14., dtype=torch.float16)\r\n+ # >>> b = torch.tensor(1.014, dtype=torch.float16)\r\n+ # >>> a+b\r\n+ # tensor(-12.9844, dtype=torch.float16)\r\n+ # >>> a = torch.tensor(-24., dtype=torch.float16)\r\n+ # >>> a+b\r\n+ # tensor(-22.9844, dtype=torch.float16)\r\n+ # >>> a = torch.tensor(-34., dtype=torch.float16)\r\n+ # >>> a+b\r\n+ # tensor(-33., dtype=torch.float16)\r\n+ # >>>\r\n+ input = ivy.swapaxes(input, dim, 0)\r\n+ source = ivy.swapaxes(source, dim, 0)\r\n+ _to_adds = []\r\n+ index = sorted(zip(ivy.to_list(index), range(len(index))), key=(lambda x: x[0]))\r\n+ while index:\r\n+ _curr_idx = index[0][0]\r\n+ while len(_to_adds) < _curr_idx:\r\n+ _to_adds.append(ivy.zeros_like(source[0]))\r\n+ _to_add_cum = ivy.get_item(source, index[0][1])\r\n+ while (1 < len(index)) and (index[0][0] == index[1][0]):\r\n+ _to_add_cum = ivy.add(_to_add_cum, ivy.get_item(source, index.pop(1)[1]))\r\n+ index.pop(0)\r\n+ _to_adds.append(_to_add_cum)\r\n+ while len(_to_adds) < input.shape[0]:\r\n+ _to_adds.append(ivy.zeros_like(source[0]))\r\n+ _to_adds = ivy.stack(_to_adds)\r\n+ if len(input.shape) < 2:\r\n+ # Added this line due to the paddle backend treating scalars as 1-d arrays\r\n+ _to_adds = ivy.flatten(_to_adds)\r\n+\r\n+ ret = ivy.add(input, _to_adds, alpha=alpha)\r\n+ ret = ivy.swapaxes(ret, 0, dim, out=out)\r\n+ return ret\n", "issue": "index_add\n\n", "before_files": [{"content": "# local\r\nimport ivy\r\nfrom ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef adjoint(input):\r\n return ivy.adjoint(input)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef cat(tensors, dim=0, *, out=None):\r\n return ivy.concat(tensors, axis=dim, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef chunk(input, chunks, dim=0):\r\n if ivy.shape(input) == ():\r\n return [input]\r\n else:\r\n dim_size = ivy.shape(input)[dim]\r\n chunk_size = dim_size // chunks\r\n if chunk_size == 0:\r\n return ivy.split(input, num_or_size_splits=dim_size, axis=dim)\r\n else:\r\n remainder = dim_size % chunks\r\n if remainder == 0:\r\n return ivy.split(input, num_or_size_splits=chunks, axis=dim)\r\n else:\r\n return ivy.split(\r\n input,\r\n num_or_size_splits=tuple(\r\n [chunk_size + remainder] + [chunk_size] * (chunks - 1)\r\n ),\r\n axis=dim,\r\n )\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef concat(tensors, dim=0, *, out=None):\r\n return ivy.concat(tensors, axis=dim, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef gather(input, dim, index, *, sparse_grad=False, out=None):\r\n if sparse_grad:\r\n raise ivy.utils.exceptions.IvyException(\r\n \"Gather does not yet support the sparse grad functionality\"\r\n )\r\n\r\n dim = dim % len(input.shape)\r\n all_indices = ivy.argwhere(ivy.full(index.shape, True))\r\n gather_locations = ivy.reshape(index, [ivy.prod(ivy.array(index.shape))])\r\n\r\n gather_indices = []\r\n for axis in range(len(index.shape)):\r\n if axis == dim:\r\n gather_indices.append(ivy.array(gather_locations, dtype=index.dtype))\r\n else:\r\n gather_indices.append(ivy.array(all_indices[:, axis], dtype=index.dtype))\r\n\r\n gather_indices = ivy.stack(gather_indices, axis=-1)\r\n gathered = ivy.gather_nd(input, gather_indices)\r\n reshaped = ivy.reshape(gathered, index.shape)\r\n return reshaped\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef nonzero(input, *, out=None, as_tuple=False):\r\n ret = ivy.nonzero(input)\r\n if as_tuple is False:\r\n ret = ivy.matrix_transpose(ivy.stack(ret))\r\n\r\n if ivy.exists(out):\r\n return ivy.inplace_update(out, ret)\r\n return ret\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef permute(input, dims):\r\n return ivy.permute_dims(input, axes=dims)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef reshape(input, shape):\r\n return ivy.reshape(input, shape)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef squeeze(input, dim):\r\n if isinstance(dim, int) and input.ndim > 0:\r\n if input.shape[dim] > 1:\r\n return input\r\n return ivy.squeeze(input, dim)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef stack(tensors, dim=0, *, out=None):\r\n return ivy.stack(tensors, axis=dim, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef swapaxes(input, axis0, axis1):\r\n return ivy.swapaxes(input, axis0, axis1)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef swapdims(input, dim0, dim1):\r\n return ivy.swapaxes(input, dim0, dim1)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef transpose(input, dim0, dim1):\r\n return ivy.swapaxes(input, dim0, dim1)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef t(input):\r\n if input.ndim > 2:\r\n raise ivy.utils.exceptions.IvyException(\r\n \"t(input) expects a tensor with <= 2 dimensions, but self is %dD\"\r\n % input.ndim\r\n )\r\n if input.ndim == 2:\r\n return ivy.swapaxes(input, 0, 1)\r\n else:\r\n return input\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef tile(input, dims):\r\n try:\r\n tup = tuple(dims)\r\n except TypeError:\r\n tup = (dims,)\r\n d = len(tup)\r\n res = 0\r\n if len(input.shape) > len([dims]) - 1:\r\n res = input\r\n if d < input.ndim:\r\n tup = (1,) * (input.ndim - d) + tup\r\n res = ivy.tile(input, tup)\r\n\r\n else:\r\n res = ivy.tile(input, repeats=dims, out=None)\r\n return res\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef unsqueeze(input, dim=0):\r\n return ivy.expand_dims(input, axis=dim)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef argwhere(input):\r\n return ivy.argwhere(input)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef movedim(input, source, destination):\r\n return ivy.moveaxis(input, source, destination)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef moveaxis(input, source, destination):\r\n return ivy.moveaxis(input, source, destination)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef hstack(tensors, *, out=None):\r\n return ivy.hstack(tensors, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef index_select(input, dim, index, *, out=None):\r\n return ivy.gather(input, index, axis=dim, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef dstack(tensors, *, out=None):\r\n return ivy.dstack(tensors, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef take_along_dim(input, indices, dim, *, out=None):\r\n return ivy.take_along_axis(input, indices, dim, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef vstack(tensors, *, out=None):\r\n return ivy.vstack(tensors, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef split(tensor, split_size_or_sections, dim=0):\r\n if isinstance(split_size_or_sections, int):\r\n split_size = split_size_or_sections\r\n split_size_or_sections = [split_size] * (tensor.shape[dim] // split_size)\r\n if tensor.shape[dim] % split_size:\r\n split_size_or_sections.append(tensor.shape[dim] % split_size)\r\n return tuple(\r\n ivy.split(\r\n tensor,\r\n num_or_size_splits=split_size_or_sections,\r\n axis=dim,\r\n with_remainder=True,\r\n )\r\n )\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef tensor_split(input, indices_or_sections, dim=0):\r\n if isinstance(indices_or_sections, (list, tuple)):\r\n indices_or_sections = (\r\n ivy.diff(indices_or_sections, prepend=[0], append=[input.shape[dim]])\r\n .astype(ivy.int8)\r\n .to_list()\r\n )\r\n return ivy.split(\r\n input, num_or_size_splits=indices_or_sections, axis=dim, with_remainder=False\r\n )\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef unbind(input, dim=0):\r\n shape = list(input.shape)\r\n shape.pop(dim)\r\n return tuple([x.reshape(tuple(shape)) for x in split(input, 1, dim=dim)])\r\n\r\n\r\ndef _get_indices_or_sections(indices_or_sections, indices, sections):\r\n if not ivy.exists(indices_or_sections):\r\n if ivy.exists(indices) and not ivy.exists(sections):\r\n indices_or_sections = indices\r\n elif ivy.exists(sections) and not ivy.exists(indices):\r\n indices_or_sections = sections\r\n else:\r\n raise ivy.utils.exception.IvyError(\r\n \"got invalid argument for indices_or_sections\"\r\n )\r\n return indices_or_sections\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef dsplit(input, indices_or_sections=None, /, *, indices=None, sections=None):\r\n indices_or_sections = _get_indices_or_sections(\r\n indices_or_sections, indices, sections\r\n )\r\n return tuple(ivy.dsplit(input, indices_or_sections))\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef hsplit(input, indices_or_sections=None, /, *, indices=None, sections=None):\r\n indices_or_sections = _get_indices_or_sections(\r\n indices_or_sections, indices, sections\r\n )\r\n return tuple(ivy.hsplit(input, indices_or_sections))\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef vsplit(input, indices_or_sections=None, /, *, indices=None, sections=None):\r\n indices_or_sections = _get_indices_or_sections(\r\n indices_or_sections, indices, sections\r\n )\r\n return tuple(ivy.vsplit(input, indices_or_sections))\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef row_stack(tensors, *, out=None):\r\n return ivy.vstack(tensors, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef where(condition, input=None, other=None):\r\n if not ivy.exists(input) and not ivy.exists(other):\r\n return nonzero(condition, as_tuple=True)\r\n return ivy.where(condition, input, other)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef conj(input):\r\n return ivy.conj(input)\r\n", "path": "ivy/functional/frontends/torch/indexing_slicing_joining_mutating_ops.py"}]}
3,316
696
gh_patches_debug_883
rasdani/github-patches
git_diff
scrapy__scrapy-5880
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> _sent_failed cut the errback chain in MailSender `MailSender._sent_failed` return `None`, instead of `failure`. This cut the errback call chain, making impossible to detect in the code fail in the mails in client code. </issue> <code> [start of scrapy/mail.py] 1 """ 2 Mail sending helpers 3 4 See documentation in docs/topics/email.rst 5 """ 6 import logging 7 from email import encoders as Encoders 8 from email.mime.base import MIMEBase 9 from email.mime.multipart import MIMEMultipart 10 from email.mime.nonmultipart import MIMENonMultipart 11 from email.mime.text import MIMEText 12 from email.utils import formatdate 13 from io import BytesIO 14 15 from twisted import version as twisted_version 16 from twisted.internet import defer, ssl 17 from twisted.python.versions import Version 18 19 from scrapy.utils.misc import arg_to_iter 20 from scrapy.utils.python import to_bytes 21 22 logger = logging.getLogger(__name__) 23 24 25 # Defined in the email.utils module, but undocumented: 26 # https://github.com/python/cpython/blob/v3.9.0/Lib/email/utils.py#L42 27 COMMASPACE = ", " 28 29 30 def _to_bytes_or_none(text): 31 if text is None: 32 return None 33 return to_bytes(text) 34 35 36 class MailSender: 37 def __init__( 38 self, 39 smtphost="localhost", 40 mailfrom="scrapy@localhost", 41 smtpuser=None, 42 smtppass=None, 43 smtpport=25, 44 smtptls=False, 45 smtpssl=False, 46 debug=False, 47 ): 48 self.smtphost = smtphost 49 self.smtpport = smtpport 50 self.smtpuser = _to_bytes_or_none(smtpuser) 51 self.smtppass = _to_bytes_or_none(smtppass) 52 self.smtptls = smtptls 53 self.smtpssl = smtpssl 54 self.mailfrom = mailfrom 55 self.debug = debug 56 57 @classmethod 58 def from_settings(cls, settings): 59 return cls( 60 smtphost=settings["MAIL_HOST"], 61 mailfrom=settings["MAIL_FROM"], 62 smtpuser=settings["MAIL_USER"], 63 smtppass=settings["MAIL_PASS"], 64 smtpport=settings.getint("MAIL_PORT"), 65 smtptls=settings.getbool("MAIL_TLS"), 66 smtpssl=settings.getbool("MAIL_SSL"), 67 ) 68 69 def send( 70 self, 71 to, 72 subject, 73 body, 74 cc=None, 75 attachs=(), 76 mimetype="text/plain", 77 charset=None, 78 _callback=None, 79 ): 80 from twisted.internet import reactor 81 82 if attachs: 83 msg = MIMEMultipart() 84 else: 85 msg = MIMENonMultipart(*mimetype.split("/", 1)) 86 87 to = list(arg_to_iter(to)) 88 cc = list(arg_to_iter(cc)) 89 90 msg["From"] = self.mailfrom 91 msg["To"] = COMMASPACE.join(to) 92 msg["Date"] = formatdate(localtime=True) 93 msg["Subject"] = subject 94 rcpts = to[:] 95 if cc: 96 rcpts.extend(cc) 97 msg["Cc"] = COMMASPACE.join(cc) 98 99 if charset: 100 msg.set_charset(charset) 101 102 if attachs: 103 msg.attach(MIMEText(body, "plain", charset or "us-ascii")) 104 for attach_name, mimetype, f in attachs: 105 part = MIMEBase(*mimetype.split("/")) 106 part.set_payload(f.read()) 107 Encoders.encode_base64(part) 108 part.add_header( 109 "Content-Disposition", "attachment", filename=attach_name 110 ) 111 msg.attach(part) 112 else: 113 msg.set_payload(body) 114 115 if _callback: 116 _callback(to=to, subject=subject, body=body, cc=cc, attach=attachs, msg=msg) 117 118 if self.debug: 119 logger.debug( 120 "Debug mail sent OK: To=%(mailto)s Cc=%(mailcc)s " 121 'Subject="%(mailsubject)s" Attachs=%(mailattachs)d', 122 { 123 "mailto": to, 124 "mailcc": cc, 125 "mailsubject": subject, 126 "mailattachs": len(attachs), 127 }, 128 ) 129 return 130 131 dfd = self._sendmail(rcpts, msg.as_string().encode(charset or "utf-8")) 132 dfd.addCallbacks( 133 callback=self._sent_ok, 134 errback=self._sent_failed, 135 callbackArgs=[to, cc, subject, len(attachs)], 136 errbackArgs=[to, cc, subject, len(attachs)], 137 ) 138 reactor.addSystemEventTrigger("before", "shutdown", lambda: dfd) 139 return dfd 140 141 def _sent_ok(self, result, to, cc, subject, nattachs): 142 logger.info( 143 "Mail sent OK: To=%(mailto)s Cc=%(mailcc)s " 144 'Subject="%(mailsubject)s" Attachs=%(mailattachs)d', 145 { 146 "mailto": to, 147 "mailcc": cc, 148 "mailsubject": subject, 149 "mailattachs": nattachs, 150 }, 151 ) 152 153 def _sent_failed(self, failure, to, cc, subject, nattachs): 154 errstr = str(failure.value) 155 logger.error( 156 "Unable to send mail: To=%(mailto)s Cc=%(mailcc)s " 157 'Subject="%(mailsubject)s" Attachs=%(mailattachs)d' 158 "- %(mailerr)s", 159 { 160 "mailto": to, 161 "mailcc": cc, 162 "mailsubject": subject, 163 "mailattachs": nattachs, 164 "mailerr": errstr, 165 }, 166 ) 167 168 def _sendmail(self, to_addrs, msg): 169 from twisted.internet import reactor 170 171 msg = BytesIO(msg) 172 d = defer.Deferred() 173 174 factory = self._create_sender_factory(to_addrs, msg, d) 175 176 if self.smtpssl: 177 reactor.connectSSL( 178 self.smtphost, self.smtpport, factory, ssl.ClientContextFactory() 179 ) 180 else: 181 reactor.connectTCP(self.smtphost, self.smtpport, factory) 182 183 return d 184 185 def _create_sender_factory(self, to_addrs, msg, d): 186 from twisted.mail.smtp import ESMTPSenderFactory 187 188 factory_keywords = { 189 "heloFallback": True, 190 "requireAuthentication": False, 191 "requireTransportSecurity": self.smtptls, 192 } 193 194 # Newer versions of twisted require the hostname to use STARTTLS 195 if twisted_version >= Version("twisted", 21, 2, 0): 196 factory_keywords["hostname"] = self.smtphost 197 198 factory = ESMTPSenderFactory( 199 self.smtpuser, 200 self.smtppass, 201 self.mailfrom, 202 to_addrs, 203 msg, 204 d, 205 **factory_keywords 206 ) 207 factory.noisy = False 208 return factory 209 [end of scrapy/mail.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scrapy/mail.py b/scrapy/mail.py --- a/scrapy/mail.py +++ b/scrapy/mail.py @@ -164,6 +164,7 @@ "mailerr": errstr, }, ) + return failure def _sendmail(self, to_addrs, msg): from twisted.internet import reactor
{"golden_diff": "diff --git a/scrapy/mail.py b/scrapy/mail.py\n--- a/scrapy/mail.py\n+++ b/scrapy/mail.py\n@@ -164,6 +164,7 @@\n \"mailerr\": errstr,\n },\n )\n+ return failure\n \n def _sendmail(self, to_addrs, msg):\n from twisted.internet import reactor\n", "issue": "_sent_failed cut the errback chain in MailSender\n`MailSender._sent_failed` return `None`, instead of `failure`. This cut the errback call chain, making impossible to detect in the code fail in the mails in client code.\n\n", "before_files": [{"content": "\"\"\"\nMail sending helpers\n\nSee documentation in docs/topics/email.rst\n\"\"\"\nimport logging\nfrom email import encoders as Encoders\nfrom email.mime.base import MIMEBase\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.nonmultipart import MIMENonMultipart\nfrom email.mime.text import MIMEText\nfrom email.utils import formatdate\nfrom io import BytesIO\n\nfrom twisted import version as twisted_version\nfrom twisted.internet import defer, ssl\nfrom twisted.python.versions import Version\n\nfrom scrapy.utils.misc import arg_to_iter\nfrom scrapy.utils.python import to_bytes\n\nlogger = logging.getLogger(__name__)\n\n\n# Defined in the email.utils module, but undocumented:\n# https://github.com/python/cpython/blob/v3.9.0/Lib/email/utils.py#L42\nCOMMASPACE = \", \"\n\n\ndef _to_bytes_or_none(text):\n if text is None:\n return None\n return to_bytes(text)\n\n\nclass MailSender:\n def __init__(\n self,\n smtphost=\"localhost\",\n mailfrom=\"scrapy@localhost\",\n smtpuser=None,\n smtppass=None,\n smtpport=25,\n smtptls=False,\n smtpssl=False,\n debug=False,\n ):\n self.smtphost = smtphost\n self.smtpport = smtpport\n self.smtpuser = _to_bytes_or_none(smtpuser)\n self.smtppass = _to_bytes_or_none(smtppass)\n self.smtptls = smtptls\n self.smtpssl = smtpssl\n self.mailfrom = mailfrom\n self.debug = debug\n\n @classmethod\n def from_settings(cls, settings):\n return cls(\n smtphost=settings[\"MAIL_HOST\"],\n mailfrom=settings[\"MAIL_FROM\"],\n smtpuser=settings[\"MAIL_USER\"],\n smtppass=settings[\"MAIL_PASS\"],\n smtpport=settings.getint(\"MAIL_PORT\"),\n smtptls=settings.getbool(\"MAIL_TLS\"),\n smtpssl=settings.getbool(\"MAIL_SSL\"),\n )\n\n def send(\n self,\n to,\n subject,\n body,\n cc=None,\n attachs=(),\n mimetype=\"text/plain\",\n charset=None,\n _callback=None,\n ):\n from twisted.internet import reactor\n\n if attachs:\n msg = MIMEMultipart()\n else:\n msg = MIMENonMultipart(*mimetype.split(\"/\", 1))\n\n to = list(arg_to_iter(to))\n cc = list(arg_to_iter(cc))\n\n msg[\"From\"] = self.mailfrom\n msg[\"To\"] = COMMASPACE.join(to)\n msg[\"Date\"] = formatdate(localtime=True)\n msg[\"Subject\"] = subject\n rcpts = to[:]\n if cc:\n rcpts.extend(cc)\n msg[\"Cc\"] = COMMASPACE.join(cc)\n\n if charset:\n msg.set_charset(charset)\n\n if attachs:\n msg.attach(MIMEText(body, \"plain\", charset or \"us-ascii\"))\n for attach_name, mimetype, f in attachs:\n part = MIMEBase(*mimetype.split(\"/\"))\n part.set_payload(f.read())\n Encoders.encode_base64(part)\n part.add_header(\n \"Content-Disposition\", \"attachment\", filename=attach_name\n )\n msg.attach(part)\n else:\n msg.set_payload(body)\n\n if _callback:\n _callback(to=to, subject=subject, body=body, cc=cc, attach=attachs, msg=msg)\n\n if self.debug:\n logger.debug(\n \"Debug mail sent OK: To=%(mailto)s Cc=%(mailcc)s \"\n 'Subject=\"%(mailsubject)s\" Attachs=%(mailattachs)d',\n {\n \"mailto\": to,\n \"mailcc\": cc,\n \"mailsubject\": subject,\n \"mailattachs\": len(attachs),\n },\n )\n return\n\n dfd = self._sendmail(rcpts, msg.as_string().encode(charset or \"utf-8\"))\n dfd.addCallbacks(\n callback=self._sent_ok,\n errback=self._sent_failed,\n callbackArgs=[to, cc, subject, len(attachs)],\n errbackArgs=[to, cc, subject, len(attachs)],\n )\n reactor.addSystemEventTrigger(\"before\", \"shutdown\", lambda: dfd)\n return dfd\n\n def _sent_ok(self, result, to, cc, subject, nattachs):\n logger.info(\n \"Mail sent OK: To=%(mailto)s Cc=%(mailcc)s \"\n 'Subject=\"%(mailsubject)s\" Attachs=%(mailattachs)d',\n {\n \"mailto\": to,\n \"mailcc\": cc,\n \"mailsubject\": subject,\n \"mailattachs\": nattachs,\n },\n )\n\n def _sent_failed(self, failure, to, cc, subject, nattachs):\n errstr = str(failure.value)\n logger.error(\n \"Unable to send mail: To=%(mailto)s Cc=%(mailcc)s \"\n 'Subject=\"%(mailsubject)s\" Attachs=%(mailattachs)d'\n \"- %(mailerr)s\",\n {\n \"mailto\": to,\n \"mailcc\": cc,\n \"mailsubject\": subject,\n \"mailattachs\": nattachs,\n \"mailerr\": errstr,\n },\n )\n\n def _sendmail(self, to_addrs, msg):\n from twisted.internet import reactor\n\n msg = BytesIO(msg)\n d = defer.Deferred()\n\n factory = self._create_sender_factory(to_addrs, msg, d)\n\n if self.smtpssl:\n reactor.connectSSL(\n self.smtphost, self.smtpport, factory, ssl.ClientContextFactory()\n )\n else:\n reactor.connectTCP(self.smtphost, self.smtpport, factory)\n\n return d\n\n def _create_sender_factory(self, to_addrs, msg, d):\n from twisted.mail.smtp import ESMTPSenderFactory\n\n factory_keywords = {\n \"heloFallback\": True,\n \"requireAuthentication\": False,\n \"requireTransportSecurity\": self.smtptls,\n }\n\n # Newer versions of twisted require the hostname to use STARTTLS\n if twisted_version >= Version(\"twisted\", 21, 2, 0):\n factory_keywords[\"hostname\"] = self.smtphost\n\n factory = ESMTPSenderFactory(\n self.smtpuser,\n self.smtppass,\n self.mailfrom,\n to_addrs,\n msg,\n d,\n **factory_keywords\n )\n factory.noisy = False\n return factory\n", "path": "scrapy/mail.py"}]}
2,542
79
gh_patches_debug_21680
rasdani/github-patches
git_diff
conan-io__conan-2943
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Inconsistency between local and remote version of `conan search` Depending on searching either in remotes or locally we're getting different results for situations where we don't use wildcards. Example: ``` $ conan search zlib There are no packages matching the 'zlib' pattern $ conan search zlib* Existing package recipes: zlib/1.2.8@conan/stable zlib/1.2.11@conan/stable ``` ``` $ conan search zlib -r conan-center Existing package recipes: zlib/1.2.8@conan/stable zlib/1.2.11@conan/stable zlib/1.2.11@conan/testing ``` Same for combinations such as `zlib/1.2.8`, `zlib/1.2.8@`, `zlib/1.2.8@conan`, `zlib/1.2.8@conan/` except for `zlib/`. Proposition: make local search act in the same manner as remote search. </issue> <code> [start of conans/search/search.py] 1 import re 2 import os 3 4 5 from fnmatch import translate 6 7 from conans.errors import ConanException, NotFoundException 8 from conans.model.info import ConanInfo 9 from conans.model.ref import PackageReference, ConanFileReference 10 from conans.paths import CONANINFO 11 from conans.util.log import logger 12 from conans.search.query_parse import infix_to_postfix, evaluate_postfix 13 from conans.util.files import list_folder_subdirs, load 14 15 16 def filter_outdated(packages_infos, recipe_hash): 17 result = {} 18 for package_id, info in packages_infos.items(): 19 try: # Existing package_info of old package might not have recipe_hash 20 if info["recipe_hash"] != recipe_hash: 21 result[package_id] = info 22 except KeyError: 23 pass 24 return result 25 26 27 def filter_packages(query, package_infos): 28 if query is None: 29 return package_infos 30 try: 31 if "!" in query: 32 raise ConanException("'!' character is not allowed") 33 if " not " in query or query.startswith("not "): 34 raise ConanException("'not' operator is not allowed") 35 postfix = infix_to_postfix(query) if query else [] 36 result = {} 37 for package_id, info in package_infos.items(): 38 if evaluate_postfix_with_info(postfix, info): 39 result[package_id] = info 40 return result 41 except Exception as exc: 42 raise ConanException("Invalid package query: %s. %s" % (query, exc)) 43 44 45 def evaluate_postfix_with_info(postfix, conan_vars_info): 46 47 # Evaluate conaninfo with the expression 48 49 def evaluate_info(expression): 50 """Receives an expression like compiler.version="12" 51 Uses conan_vars_info in the closure to evaluate it""" 52 name, value = expression.split("=", 1) 53 value = value.replace("\"", "") 54 return evaluate(name, value, conan_vars_info) 55 56 return evaluate_postfix(postfix, evaluate_info) 57 58 59 def evaluate(prop_name, prop_value, conan_vars_info): 60 """ 61 Evaluates a single prop_name, prop_value like "os", "Windows" against conan_vars_info.serialize_min() 62 """ 63 64 def compatible_prop(setting_value, prop_value): 65 return setting_value is None or prop_value == setting_value 66 67 info_settings = conan_vars_info.get("settings", []) 68 info_options = conan_vars_info.get("options", []) 69 70 if prop_name in ["os", "compiler", "arch", "build_type"] or prop_name.startswith("compiler."): 71 return compatible_prop(info_settings.get(prop_name, None), prop_value) 72 else: 73 return compatible_prop(info_options.get(prop_name, None), prop_value) 74 return False 75 76 77 def search_recipes(paths, pattern=None, ignorecase=True): 78 # Conan references in main storage 79 if pattern: 80 if isinstance(pattern, ConanFileReference): 81 pattern = str(pattern) 82 pattern = translate(pattern) 83 pattern = re.compile(pattern, re.IGNORECASE) if ignorecase else re.compile(pattern) 84 85 subdirs = list_folder_subdirs(basedir=paths.store, level=4) 86 if not pattern: 87 return sorted([ConanFileReference(*folder.split("/")) for folder in subdirs]) 88 else: 89 ret = [] 90 for subdir in subdirs: 91 conan_ref = ConanFileReference(*subdir.split("/")) 92 if pattern: 93 if pattern.match(str(conan_ref)): 94 ret.append(conan_ref) 95 return sorted(ret) 96 97 98 def search_packages(paths, reference, query): 99 """ Return a dict like this: 100 101 {package_ID: {name: "OpenCV", 102 version: "2.14", 103 settings: {os: Windows}}} 104 param conan_ref: ConanFileReference object 105 """ 106 infos = _get_local_infos_min(paths, reference) 107 return filter_packages(query, infos) 108 109 110 def _get_local_infos_min(paths, reference): 111 result = {} 112 packages_path = paths.packages(reference) 113 subdirs = list_folder_subdirs(packages_path, level=1) 114 for package_id in subdirs: 115 # Read conaninfo 116 try: 117 package_reference = PackageReference(reference, package_id) 118 info_path = os.path.join(paths.package(package_reference, 119 short_paths=None), CONANINFO) 120 if not os.path.exists(info_path): 121 raise NotFoundException("") 122 conan_info_content = load(info_path) 123 conan_vars_info = ConanInfo.loads(conan_info_content).serialize_min() 124 result[package_id] = conan_vars_info 125 126 except Exception as exc: 127 logger.error("Package %s has no ConanInfo file" % str(package_reference)) 128 if str(exc): 129 logger.error(str(exc)) 130 131 return result 132 [end of conans/search/search.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/conans/search/search.py b/conans/search/search.py --- a/conans/search/search.py +++ b/conans/search/search.py @@ -1,7 +1,6 @@ import re import os - from fnmatch import translate from conans.errors import ConanException, NotFoundException @@ -90,11 +89,27 @@ for subdir in subdirs: conan_ref = ConanFileReference(*subdir.split("/")) if pattern: - if pattern.match(str(conan_ref)): + if _partial_match(pattern, conan_ref): ret.append(conan_ref) + return sorted(ret) +def _partial_match(pattern, conan_ref): + """ + Finds if pattern matches any of partial sums of tokens of conan reference + """ + + tokens = str(conan_ref).replace('/', ' / ').replace('@', ' @ ').split() + + def partial_sums(iterable): + sum = '' + for i in iterable: + sum += i + yield sum + + return any(map(pattern.match, list(partial_sums(tokens)))) + def search_packages(paths, reference, query): """ Return a dict like this:
{"golden_diff": "diff --git a/conans/search/search.py b/conans/search/search.py\n--- a/conans/search/search.py\n+++ b/conans/search/search.py\n@@ -1,7 +1,6 @@\n import re\n import os\n \n-\n from fnmatch import translate\n \n from conans.errors import ConanException, NotFoundException\n@@ -90,11 +89,27 @@\n for subdir in subdirs:\n conan_ref = ConanFileReference(*subdir.split(\"/\"))\n if pattern:\n- if pattern.match(str(conan_ref)):\n+ if _partial_match(pattern, conan_ref):\n ret.append(conan_ref)\n+\n return sorted(ret)\n \n \n+def _partial_match(pattern, conan_ref):\n+ \"\"\"\n+ Finds if pattern matches any of partial sums of tokens of conan reference\n+ \"\"\"\n+ \n+ tokens = str(conan_ref).replace('/', ' / ').replace('@', ' @ ').split()\n+\n+ def partial_sums(iterable):\n+ sum = ''\n+ for i in iterable:\n+ sum += i\n+ yield sum\n+\n+ return any(map(pattern.match, list(partial_sums(tokens))))\n+\n def search_packages(paths, reference, query):\n \"\"\" Return a dict like this:\n", "issue": "Inconsistency between local and remote version of `conan search`\nDepending on searching either in remotes or locally we're getting different results for situations where we don't use wildcards. \r\nExample:\r\n```\r\n$ conan search zlib\r\nThere are no packages matching the 'zlib' pattern\r\n\r\n$ conan search zlib*\r\nExisting package recipes:\r\n\r\nzlib/1.2.8@conan/stable\r\nzlib/1.2.11@conan/stable\r\n```\r\n```\r\n$ conan search zlib -r conan-center\r\nExisting package recipes:\r\n\r\nzlib/1.2.8@conan/stable\r\nzlib/1.2.11@conan/stable\r\nzlib/1.2.11@conan/testing\r\n```\r\nSame for combinations such as `zlib/1.2.8`, `zlib/1.2.8@`, `zlib/1.2.8@conan`, `zlib/1.2.8@conan/` except for `zlib/`.\r\n\r\nProposition: make local search act in the same manner as remote search.\n", "before_files": [{"content": "import re\nimport os\n\n\nfrom fnmatch import translate\n\nfrom conans.errors import ConanException, NotFoundException\nfrom conans.model.info import ConanInfo\nfrom conans.model.ref import PackageReference, ConanFileReference\nfrom conans.paths import CONANINFO\nfrom conans.util.log import logger\nfrom conans.search.query_parse import infix_to_postfix, evaluate_postfix\nfrom conans.util.files import list_folder_subdirs, load\n\n\ndef filter_outdated(packages_infos, recipe_hash):\n result = {}\n for package_id, info in packages_infos.items():\n try: # Existing package_info of old package might not have recipe_hash\n if info[\"recipe_hash\"] != recipe_hash:\n result[package_id] = info\n except KeyError:\n pass\n return result\n\n\ndef filter_packages(query, package_infos):\n if query is None:\n return package_infos\n try:\n if \"!\" in query:\n raise ConanException(\"'!' character is not allowed\")\n if \" not \" in query or query.startswith(\"not \"):\n raise ConanException(\"'not' operator is not allowed\")\n postfix = infix_to_postfix(query) if query else []\n result = {}\n for package_id, info in package_infos.items():\n if evaluate_postfix_with_info(postfix, info):\n result[package_id] = info\n return result\n except Exception as exc:\n raise ConanException(\"Invalid package query: %s. %s\" % (query, exc))\n\n\ndef evaluate_postfix_with_info(postfix, conan_vars_info):\n\n # Evaluate conaninfo with the expression\n\n def evaluate_info(expression):\n \"\"\"Receives an expression like compiler.version=\"12\"\n Uses conan_vars_info in the closure to evaluate it\"\"\"\n name, value = expression.split(\"=\", 1)\n value = value.replace(\"\\\"\", \"\")\n return evaluate(name, value, conan_vars_info)\n\n return evaluate_postfix(postfix, evaluate_info)\n\n\ndef evaluate(prop_name, prop_value, conan_vars_info):\n \"\"\"\n Evaluates a single prop_name, prop_value like \"os\", \"Windows\" against conan_vars_info.serialize_min()\n \"\"\"\n\n def compatible_prop(setting_value, prop_value):\n return setting_value is None or prop_value == setting_value\n\n info_settings = conan_vars_info.get(\"settings\", [])\n info_options = conan_vars_info.get(\"options\", [])\n\n if prop_name in [\"os\", \"compiler\", \"arch\", \"build_type\"] or prop_name.startswith(\"compiler.\"):\n return compatible_prop(info_settings.get(prop_name, None), prop_value)\n else:\n return compatible_prop(info_options.get(prop_name, None), prop_value)\n return False\n\n\ndef search_recipes(paths, pattern=None, ignorecase=True):\n # Conan references in main storage\n if pattern:\n if isinstance(pattern, ConanFileReference):\n pattern = str(pattern)\n pattern = translate(pattern)\n pattern = re.compile(pattern, re.IGNORECASE) if ignorecase else re.compile(pattern)\n\n subdirs = list_folder_subdirs(basedir=paths.store, level=4)\n if not pattern:\n return sorted([ConanFileReference(*folder.split(\"/\")) for folder in subdirs])\n else:\n ret = []\n for subdir in subdirs:\n conan_ref = ConanFileReference(*subdir.split(\"/\"))\n if pattern:\n if pattern.match(str(conan_ref)):\n ret.append(conan_ref)\n return sorted(ret)\n\n\ndef search_packages(paths, reference, query):\n \"\"\" Return a dict like this:\n\n {package_ID: {name: \"OpenCV\",\n version: \"2.14\",\n settings: {os: Windows}}}\n param conan_ref: ConanFileReference object\n \"\"\"\n infos = _get_local_infos_min(paths, reference)\n return filter_packages(query, infos)\n\n\ndef _get_local_infos_min(paths, reference):\n result = {}\n packages_path = paths.packages(reference)\n subdirs = list_folder_subdirs(packages_path, level=1)\n for package_id in subdirs:\n # Read conaninfo\n try:\n package_reference = PackageReference(reference, package_id)\n info_path = os.path.join(paths.package(package_reference,\n short_paths=None), CONANINFO)\n if not os.path.exists(info_path):\n raise NotFoundException(\"\")\n conan_info_content = load(info_path)\n conan_vars_info = ConanInfo.loads(conan_info_content).serialize_min()\n result[package_id] = conan_vars_info\n\n except Exception as exc:\n logger.error(\"Package %s has no ConanInfo file\" % str(package_reference))\n if str(exc):\n logger.error(str(exc))\n\n return result\n", "path": "conans/search/search.py"}]}
2,075
265
gh_patches_debug_16835
rasdani/github-patches
git_diff
ESMCI__cime-538
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> PET tests do not work on skybridge Skybridge insta-fails the single-threaded case because it tries to use 16 procs-per-node and the sbatch only requested 8 ppn. </issue> <code> [start of utils/python/CIME/SystemTests/pet.py] 1 """ 2 Implementation of the CIME PET test. This class inherits from SystemTestsCommon 3 4 This is an openmp test to determine that changing thread counts does not change answers. 5 (1) do an initial run where all components are threaded by default (suffix: base) 6 (2) do another initial run with nthrds=1 for all components (suffix: single_thread) 7 """ 8 9 from CIME.XML.standard_module_setup import * 10 from CIME.case_setup import case_setup 11 from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo 12 13 logger = logging.getLogger(__name__) 14 15 class PET(SystemTestsCompareTwo): 16 17 _COMPONENT_LIST = ('ATM','CPL','OCN','WAV','GLC','ICE','ROF','LND') 18 19 def __init__(self, case): 20 """ 21 initialize a test object 22 """ 23 SystemTestsCompareTwo.__init__(self, case, 24 separate_builds = False, 25 run_two_suffix = 'single_thread', 26 run_one_description = 'default threading', 27 run_two_description = 'threads set to 1') 28 29 def _case_one_setup(self): 30 # first make sure that all components have threaded settings 31 for comp in self._COMPONENT_LIST: 32 if self._case.get_value("NTHRDS_%s"%comp) <= 1: 33 self._case.set_value("NTHRDS_%s"%comp, 2) 34 35 # Need to redo case_setup because we may have changed the number of threads 36 case_setup(self._case, reset=True) 37 38 def _case_two_setup(self): 39 #Do a run with all threads set to 1 40 for comp in self._COMPONENT_LIST: 41 self._case.set_value("NTHRDS_%s"%comp, 1) 42 43 # Need to redo case_setup because we may have changed the number of threads 44 case_setup(self._case, reset=True) 45 [end of utils/python/CIME/SystemTests/pet.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/utils/python/CIME/SystemTests/pet.py b/utils/python/CIME/SystemTests/pet.py --- a/utils/python/CIME/SystemTests/pet.py +++ b/utils/python/CIME/SystemTests/pet.py @@ -40,5 +40,14 @@ for comp in self._COMPONENT_LIST: self._case.set_value("NTHRDS_%s"%comp, 1) + # The need for this is subtle. On batch systems, the entire PET test runs + # under a single submission and that submission is configured based on + # the case settings for case 1, IE 2 threads for all components. This causes + # the procs-per-node to be half of what it would be for single thread. On some + # machines, if the mpiexec tries to exceed the procs-per-node that were given + # to the batch submission, things break. Setting MAX_TASKS_PER_NODE to half of + # it original value prevents this. + self._case.set_value("MAX_TASKS_PER_NODE", self._case.get_value("MAX_TASKS_PER_NODE") / 2) + # Need to redo case_setup because we may have changed the number of threads case_setup(self._case, reset=True)
{"golden_diff": "diff --git a/utils/python/CIME/SystemTests/pet.py b/utils/python/CIME/SystemTests/pet.py\n--- a/utils/python/CIME/SystemTests/pet.py\n+++ b/utils/python/CIME/SystemTests/pet.py\n@@ -40,5 +40,14 @@\n for comp in self._COMPONENT_LIST:\n self._case.set_value(\"NTHRDS_%s\"%comp, 1)\n \n+ # The need for this is subtle. On batch systems, the entire PET test runs\n+ # under a single submission and that submission is configured based on\n+ # the case settings for case 1, IE 2 threads for all components. This causes\n+ # the procs-per-node to be half of what it would be for single thread. On some\n+ # machines, if the mpiexec tries to exceed the procs-per-node that were given\n+ # to the batch submission, things break. Setting MAX_TASKS_PER_NODE to half of\n+ # it original value prevents this.\n+ self._case.set_value(\"MAX_TASKS_PER_NODE\", self._case.get_value(\"MAX_TASKS_PER_NODE\") / 2)\n+\n # Need to redo case_setup because we may have changed the number of threads\n case_setup(self._case, reset=True)\n", "issue": "PET tests do not work on skybridge\nSkybridge insta-fails the single-threaded case because it tries to use 16 procs-per-node and the sbatch only requested 8 ppn.\n\n", "before_files": [{"content": "\"\"\"\nImplementation of the CIME PET test. This class inherits from SystemTestsCommon\n\nThis is an openmp test to determine that changing thread counts does not change answers.\n(1) do an initial run where all components are threaded by default (suffix: base)\n(2) do another initial run with nthrds=1 for all components (suffix: single_thread)\n\"\"\"\n\nfrom CIME.XML.standard_module_setup import *\nfrom CIME.case_setup import case_setup\nfrom CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo\n\nlogger = logging.getLogger(__name__)\n\nclass PET(SystemTestsCompareTwo):\n\n _COMPONENT_LIST = ('ATM','CPL','OCN','WAV','GLC','ICE','ROF','LND')\n\n def __init__(self, case):\n \"\"\"\n initialize a test object\n \"\"\"\n SystemTestsCompareTwo.__init__(self, case,\n separate_builds = False,\n run_two_suffix = 'single_thread',\n run_one_description = 'default threading',\n run_two_description = 'threads set to 1')\n\n def _case_one_setup(self):\n # first make sure that all components have threaded settings\n for comp in self._COMPONENT_LIST:\n if self._case.get_value(\"NTHRDS_%s\"%comp) <= 1:\n self._case.set_value(\"NTHRDS_%s\"%comp, 2)\n\n # Need to redo case_setup because we may have changed the number of threads\n case_setup(self._case, reset=True)\n\n def _case_two_setup(self):\n #Do a run with all threads set to 1\n for comp in self._COMPONENT_LIST:\n self._case.set_value(\"NTHRDS_%s\"%comp, 1)\n\n # Need to redo case_setup because we may have changed the number of threads\n case_setup(self._case, reset=True)\n", "path": "utils/python/CIME/SystemTests/pet.py"}]}
1,070
279
gh_patches_debug_53979
rasdani/github-patches
git_diff
scikit-hep__pyhf-1261
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Remove duplicated libraries in setup.py # Description In `setup.py` and `setup.cfg` there are some duplicated libraries that should be removed from `setup.py`. https://github.com/scikit-hep/pyhf/blob/75f3cd350ed3986d16d680fbb83f312791aafd68/setup.py#L47 already exists as a core requirement in `setup.cfg` https://github.com/scikit-hep/pyhf/blob/75f3cd350ed3986d16d680fbb83f312791aafd68/setup.cfg#L45 and so should be removed from `setup.py`. It also isn't clear if https://github.com/scikit-hep/pyhf/blob/75f3cd350ed3986d16d680fbb83f312791aafd68/setup.py#L42 is still required, given that it was added back in PR #186 when we still used Coveralls for coverage. </issue> <code> [start of setup.py] 1 from setuptools import setup 2 3 extras_require = { 4 'shellcomplete': ['click_completion'], 5 'tensorflow': [ 6 'tensorflow~=2.2.0', # TensorFlow minor releases are as volatile as major 7 'tensorflow-probability~=0.10.0', 8 ], 9 'torch': ['torch~=1.2'], 10 'jax': ['jax~=0.2.4', 'jaxlib~=0.1.56'], 11 'xmlio': [ 12 'uproot3~=3.14', 13 'uproot~=4.0', 14 ], # uproot3 required until writing to ROOT supported in uproot4 15 'minuit': ['iminuit~=2.1'], 16 } 17 extras_require['backends'] = sorted( 18 set( 19 extras_require['tensorflow'] 20 + extras_require['torch'] 21 + extras_require['jax'] 22 + extras_require['minuit'] 23 ) 24 ) 25 extras_require['contrib'] = sorted({'matplotlib', 'requests'}) 26 extras_require['lint'] = sorted({'flake8', 'black'}) 27 28 extras_require['test'] = sorted( 29 set( 30 extras_require['backends'] 31 + extras_require['xmlio'] 32 + extras_require['contrib'] 33 + extras_require['shellcomplete'] 34 + [ 35 'pytest~=6.0', 36 'pytest-cov>=2.5.1', 37 'pytest-mock', 38 'pytest-benchmark[histogram]', 39 'pytest-console-scripts', 40 'pytest-mpl', 41 'pydocstyle', 42 'coverage>=4.0', # coveralls 43 'papermill~=2.0', 44 'nteract-scrapbook~=0.2', 45 'jupyter', 46 'graphviz', 47 'jsonpatch', 48 ] 49 ) 50 ) 51 extras_require['docs'] = sorted( 52 set( 53 extras_require['xmlio'] 54 + [ 55 'sphinx>=3.1.2', 56 'sphinxcontrib-bibtex~=2.1', 57 'sphinx-click', 58 'sphinx_rtd_theme', 59 'nbsphinx', 60 'ipywidgets', 61 'sphinx-issues', 62 'sphinx-copybutton>0.2.9', 63 ] 64 ) 65 ) 66 extras_require['develop'] = sorted( 67 set( 68 extras_require['docs'] 69 + extras_require['lint'] 70 + extras_require['test'] 71 + [ 72 'nbdime', 73 'bump2version', 74 'ipython', 75 'pre-commit', 76 'check-manifest', 77 'codemetapy>=0.3.4', 78 'twine', 79 ] 80 ) 81 ) 82 extras_require['complete'] = sorted(set(sum(extras_require.values(), []))) 83 84 85 setup( 86 extras_require=extras_require, 87 use_scm_version=lambda: {'local_scheme': lambda version: ''}, 88 ) 89 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -39,12 +39,10 @@ 'pytest-console-scripts', 'pytest-mpl', 'pydocstyle', - 'coverage>=4.0', # coveralls 'papermill~=2.0', 'nteract-scrapbook~=0.2', 'jupyter', 'graphviz', - 'jsonpatch', ] ) )
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -39,12 +39,10 @@\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n- 'coverage>=4.0', # coveralls\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'graphviz',\n- 'jsonpatch',\n ]\n )\n )\n", "issue": "Remove duplicated libraries in setup.py\n# Description\r\n\r\nIn `setup.py` and `setup.cfg` there are some duplicated libraries that should be removed from `setup.py`.\r\n\r\nhttps://github.com/scikit-hep/pyhf/blob/75f3cd350ed3986d16d680fbb83f312791aafd68/setup.py#L47\r\n\r\nalready exists as a core requirement in `setup.cfg`\r\n\r\nhttps://github.com/scikit-hep/pyhf/blob/75f3cd350ed3986d16d680fbb83f312791aafd68/setup.cfg#L45\r\n\r\nand so should be removed from `setup.py`.\r\n\r\nIt also isn't clear if \r\n\r\nhttps://github.com/scikit-hep/pyhf/blob/75f3cd350ed3986d16d680fbb83f312791aafd68/setup.py#L42\r\n\r\nis still required, given that it was added back in PR #186 when we still used Coveralls for coverage.\r\n\n", "before_files": [{"content": "from setuptools import setup\n\nextras_require = {\n 'shellcomplete': ['click_completion'],\n 'tensorflow': [\n 'tensorflow~=2.2.0', # TensorFlow minor releases are as volatile as major\n 'tensorflow-probability~=0.10.0',\n ],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.2.4', 'jaxlib~=0.1.56'],\n 'xmlio': [\n 'uproot3~=3.14',\n 'uproot~=4.0',\n ], # uproot3 required until writing to ROOT supported in uproot4\n 'minuit': ['iminuit~=2.1'],\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted({'matplotlib', 'requests'})\nextras_require['lint'] = sorted({'flake8', 'black'})\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + extras_require['shellcomplete']\n + [\n 'pytest~=6.0',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'graphviz',\n 'jsonpatch',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n extras_require['xmlio']\n + [\n 'sphinx>=3.1.2',\n 'sphinxcontrib-bibtex~=2.1',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n + [\n 'nbdime',\n 'bump2version',\n 'ipython',\n 'pre-commit',\n 'check-manifest',\n 'codemetapy>=0.3.4',\n 'twine',\n ]\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}]}
1,553
109
gh_patches_debug_28166
rasdani/github-patches
git_diff
svthalia__concrexit-1818
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add payment_type or full payment to event admin API ### Motivation `api/v2/admin/events/<eventPk>/registrations/` currently only gives the uuid of a payment, so to display in the admin screen how it was paid, the payment must be requested separately. Doing this for all of the registrations would be very inefficient (like 40 extra requests to load the event admin). If we simply add the payment_type or replace the payment uuid with a payment serializer, it will be much simpler. </issue> <code> [start of website/events/api/v2/serializers/event_registration.py] 1 from rest_framework import serializers 2 3 from events.models import EventRegistration 4 from members.api.v2.serializers.member import MemberSerializer 5 6 7 class EventRegistrationSerializer(serializers.ModelSerializer): 8 """Serializer for event registrations.""" 9 10 def __init__(self, *args, **kwargs): 11 # Don't pass the 'fields' arg up to the superclass 12 fields = kwargs.pop("fields", {"pk", "member", "name"}) 13 14 # Instantiate the superclass normally 15 super().__init__(*args, **kwargs) 16 17 allowed = set(fields) 18 existing = set(self.fields.keys()) 19 for field_name in existing - allowed: 20 self.fields.pop(field_name) 21 22 class Meta: 23 model = EventRegistration 24 fields = ( 25 "pk", 26 "present", 27 "queue_position", 28 "date", 29 "payment", 30 "member", 31 "name", 32 ) 33 34 member = MemberSerializer(detailed=False, read_only=True) 35 [end of website/events/api/v2/serializers/event_registration.py] [start of website/events/api/v2/admin/serializers/event_registration.py] 1 from rest_framework import serializers 2 3 from events.models import EventRegistration 4 from members.api.v2.serializers.member import MemberSerializer 5 from members.models import Member 6 7 8 class EventRegistrationAdminSerializer(serializers.ModelSerializer): 9 """Serializer for event registrations.""" 10 11 class Meta: 12 model = EventRegistration 13 fields = ( 14 "pk", 15 "present", 16 "queue_position", 17 "date", 18 "date_cancelled", 19 "payment", 20 "member", 21 "name", 22 ) 23 read_only_fields = ("payment",) 24 25 def to_internal_value(self, data): 26 self.fields["member"] = serializers.PrimaryKeyRelatedField( 27 queryset=Member.objects.all() 28 ) 29 return super().to_internal_value(data) 30 31 def to_representation(self, instance): 32 self.fields["member"] = MemberSerializer(detailed=False, read_only=True) 33 return super().to_representation(instance) 34 [end of website/events/api/v2/admin/serializers/event_registration.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/website/events/api/v2/admin/serializers/event_registration.py b/website/events/api/v2/admin/serializers/event_registration.py --- a/website/events/api/v2/admin/serializers/event_registration.py +++ b/website/events/api/v2/admin/serializers/event_registration.py @@ -3,6 +3,7 @@ from events.models import EventRegistration from members.api.v2.serializers.member import MemberSerializer from members.models import Member +from payments.api.v2.serializers import PaymentSerializer class EventRegistrationAdminSerializer(serializers.ModelSerializer): @@ -22,6 +23,8 @@ ) read_only_fields = ("payment",) + payment = PaymentSerializer() + def to_internal_value(self, data): self.fields["member"] = serializers.PrimaryKeyRelatedField( queryset=Member.objects.all() diff --git a/website/events/api/v2/serializers/event_registration.py b/website/events/api/v2/serializers/event_registration.py --- a/website/events/api/v2/serializers/event_registration.py +++ b/website/events/api/v2/serializers/event_registration.py @@ -2,6 +2,7 @@ from events.models import EventRegistration from members.api.v2.serializers.member import MemberSerializer +from payments.api.v2.serializers import PaymentSerializer class EventRegistrationSerializer(serializers.ModelSerializer): @@ -31,4 +32,5 @@ "name", ) + payment = PaymentSerializer() member = MemberSerializer(detailed=False, read_only=True)
{"golden_diff": "diff --git a/website/events/api/v2/admin/serializers/event_registration.py b/website/events/api/v2/admin/serializers/event_registration.py\n--- a/website/events/api/v2/admin/serializers/event_registration.py\n+++ b/website/events/api/v2/admin/serializers/event_registration.py\n@@ -3,6 +3,7 @@\n from events.models import EventRegistration\n from members.api.v2.serializers.member import MemberSerializer\n from members.models import Member\n+from payments.api.v2.serializers import PaymentSerializer\n \n \n class EventRegistrationAdminSerializer(serializers.ModelSerializer):\n@@ -22,6 +23,8 @@\n )\n read_only_fields = (\"payment\",)\n \n+ payment = PaymentSerializer()\n+\n def to_internal_value(self, data):\n self.fields[\"member\"] = serializers.PrimaryKeyRelatedField(\n queryset=Member.objects.all()\ndiff --git a/website/events/api/v2/serializers/event_registration.py b/website/events/api/v2/serializers/event_registration.py\n--- a/website/events/api/v2/serializers/event_registration.py\n+++ b/website/events/api/v2/serializers/event_registration.py\n@@ -2,6 +2,7 @@\n \n from events.models import EventRegistration\n from members.api.v2.serializers.member import MemberSerializer\n+from payments.api.v2.serializers import PaymentSerializer\n \n \n class EventRegistrationSerializer(serializers.ModelSerializer):\n@@ -31,4 +32,5 @@\n \"name\",\n )\n \n+ payment = PaymentSerializer()\n member = MemberSerializer(detailed=False, read_only=True)\n", "issue": "Add payment_type or full payment to event admin API\n### Motivation\r\n`api/v2/admin/events/<eventPk>/registrations/` currently only gives the uuid of a payment, so to display in the admin screen how it was paid, the payment must be requested separately. Doing this for all of the registrations would be very inefficient (like 40 extra requests to load the event admin). If we simply add the payment_type or replace the payment uuid with a payment serializer, it will be much simpler.\r\n\n", "before_files": [{"content": "from rest_framework import serializers\n\nfrom events.models import EventRegistration\nfrom members.api.v2.serializers.member import MemberSerializer\n\n\nclass EventRegistrationSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for event registrations.\"\"\"\n\n def __init__(self, *args, **kwargs):\n # Don't pass the 'fields' arg up to the superclass\n fields = kwargs.pop(\"fields\", {\"pk\", \"member\", \"name\"})\n\n # Instantiate the superclass normally\n super().__init__(*args, **kwargs)\n\n allowed = set(fields)\n existing = set(self.fields.keys())\n for field_name in existing - allowed:\n self.fields.pop(field_name)\n\n class Meta:\n model = EventRegistration\n fields = (\n \"pk\",\n \"present\",\n \"queue_position\",\n \"date\",\n \"payment\",\n \"member\",\n \"name\",\n )\n\n member = MemberSerializer(detailed=False, read_only=True)\n", "path": "website/events/api/v2/serializers/event_registration.py"}, {"content": "from rest_framework import serializers\n\nfrom events.models import EventRegistration\nfrom members.api.v2.serializers.member import MemberSerializer\nfrom members.models import Member\n\n\nclass EventRegistrationAdminSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for event registrations.\"\"\"\n\n class Meta:\n model = EventRegistration\n fields = (\n \"pk\",\n \"present\",\n \"queue_position\",\n \"date\",\n \"date_cancelled\",\n \"payment\",\n \"member\",\n \"name\",\n )\n read_only_fields = (\"payment\",)\n\n def to_internal_value(self, data):\n self.fields[\"member\"] = serializers.PrimaryKeyRelatedField(\n queryset=Member.objects.all()\n )\n return super().to_internal_value(data)\n\n def to_representation(self, instance):\n self.fields[\"member\"] = MemberSerializer(detailed=False, read_only=True)\n return super().to_representation(instance)\n", "path": "website/events/api/v2/admin/serializers/event_registration.py"}]}
1,185
329
gh_patches_debug_28466
rasdani/github-patches
git_diff
scalableminds__webknossos-libs-598
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Warn when multiprocessing fails due to missing if __name__ guard If wklibs are used with multiprocessing (e.g., calling `downsample` on a layer), python's multiprocessing module will import the main module (if `spawn` is used which is the default on OS X). If that module has side effects (i.e., it is not guarded with `if __name__ == "__main__":`), weird errors can occur. On start up, we could spawn a dummy job with the clustertools and if that fails, we could emit ``` raise AssertionError( """ ######################################### ######################################### ######################################### ######################################### ######################################### ## Multiprocessing setup does not work ## ## properly. Please check that you are ## ## using an if __name__ == "__main__" ## ## guard in your main module. ## ######################################### ######################################### ######################################### ######################################### ######################################### """ ) ``` to help the user with what's wrong. </issue> <code> [start of cluster_tools/cluster_tools/__init__.py] 1 import logging 2 import multiprocessing 3 import os 4 import shutil 5 import tempfile 6 from concurrent import futures 7 from concurrent.futures import ProcessPoolExecutor 8 from functools import partial 9 from pathlib import Path 10 11 from . import pickling 12 from .multiprocessing_logging_handler import get_multiprocessing_logging_setup_fn 13 from .schedulers.pbs import PBSExecutor 14 from .schedulers.slurm import SlurmExecutor 15 from .util import enrich_future_with_uncaught_warning 16 17 18 def get_existent_kwargs_subset(whitelist, kwargs): 19 new_kwargs = {} 20 for arg_name in whitelist: 21 if arg_name in kwargs: 22 new_kwargs[arg_name] = kwargs[arg_name] 23 24 return new_kwargs 25 26 27 PROCESS_POOL_KWARGS_WHITELIST = ["max_workers", "mp_context", "initializer", "initargs"] 28 29 30 class WrappedProcessPoolExecutor(ProcessPoolExecutor): 31 """ 32 Wraps the ProcessPoolExecutor to add various features: 33 - map_to_futures and map_unordered method 34 - pickling of job's output (see output_pickle_path_getter and output_pickle_path) 35 - job submission via pickling to circumvent bug in python < 3.8 (see MULTIPROCESSING_VIA_IO_TMP_DIR) 36 """ 37 38 def __init__(self, **kwargs): 39 new_kwargs = get_existent_kwargs_subset(PROCESS_POOL_KWARGS_WHITELIST, kwargs) 40 41 self.did_overwrite_start_method = False 42 if kwargs.get("start_method", None) is not None: 43 self.did_overwrite_start_method = True 44 self.old_start_method = multiprocessing.get_start_method() 45 start_method = kwargs["start_method"] 46 logging.info( 47 f"Overwriting start_method to {start_method}. Previous value: {self.old_start_method}" 48 ) 49 multiprocessing.set_start_method(start_method, force=True) 50 51 ProcessPoolExecutor.__init__(self, **new_kwargs) 52 53 def shutdown(self, *args, **kwargs): 54 55 super().shutdown(*args, **kwargs) 56 57 if self.did_overwrite_start_method: 58 logging.info( 59 f"Restoring start_method to original value: {self.old_start_method}." 60 ) 61 multiprocessing.set_start_method(self.old_start_method, force=True) 62 self.old_start_method = None 63 self.did_overwrite_start_method = False 64 65 def submit(self, *args, **kwargs): 66 67 output_pickle_path = None 68 if "__cfut_options" in kwargs: 69 output_pickle_path = kwargs["__cfut_options"]["output_pickle_path"] 70 del kwargs["__cfut_options"] 71 72 if os.environ.get("MULTIPROCESSING_VIA_IO"): 73 # If MULTIPROCESSING_VIA_IO is set, _submit_via_io is used to 74 # workaround size constraints in pythons multiprocessing 75 # implementation. Also see https://github.com/python/cpython/pull/10305/files 76 # This should be fixed in python 3.8 77 submit_fn = self._submit_via_io 78 else: 79 submit_fn = super().submit 80 81 # Depending on the start_method and output_pickle_path, wrapper functions may need to be 82 # executed in the new process context, before the actual code is ran. 83 # These wrapper functions consume their arguments from *args, **kwargs and assume 84 # that the next argument will be another function that is then called. 85 # The call_stack holds all of these wrapper functions and their arguments in the correct order. 86 # For example, call_stack = [wrapper_fn_1, wrapper_fn_1_arg_1, wrapper_fn_2, actual_fn, actual_fn_arg_1] 87 # where wrapper_fn_1 is called, which eventually calls wrapper_fn_2, which eventually calls actual_fn. 88 call_stack = [] 89 90 if multiprocessing.get_start_method() != "fork": 91 # If a start_method other than the default "fork" is used, logging needs to be re-setup, 92 # because the programming context is not inherited in those cases. 93 multiprocessing_logging_setup_fn = get_multiprocessing_logging_setup_fn() 94 call_stack.extend( 95 [ 96 WrappedProcessPoolExecutor._setup_logging_and_execute, 97 multiprocessing_logging_setup_fn, 98 ] 99 ) 100 101 if output_pickle_path is not None: 102 call_stack.extend( 103 [ 104 WrappedProcessPoolExecutor._execute_and_persist_function, 105 output_pickle_path, 106 ] 107 ) 108 109 fut = submit_fn(*call_stack, *args, **kwargs) 110 111 enrich_future_with_uncaught_warning(fut) 112 return fut 113 114 def _submit_via_io(self, *args, **kwargs): 115 116 func = args[0] 117 args = args[1:] 118 119 opt_tmp_dir = os.environ.get("MULTIPROCESSING_VIA_IO_TMP_DIR") 120 if opt_tmp_dir is not None: 121 dirpath = tempfile.mkdtemp(dir=opt_tmp_dir) 122 else: 123 dirpath = tempfile.mkdtemp() 124 125 output_pickle_path = Path(dirpath) / "jobdescription.pickle" 126 127 with open(output_pickle_path, "wb") as file: 128 pickling.dump((func, args, kwargs), file) 129 130 future = super().submit( 131 WrappedProcessPoolExecutor._execute_via_io, output_pickle_path 132 ) 133 134 future.add_done_callback( 135 partial(WrappedProcessPoolExecutor._remove_tmp_file, dirpath) 136 ) 137 138 return future 139 140 @staticmethod 141 def _remove_tmp_file(path, _future): 142 143 shutil.rmtree(path) 144 145 @staticmethod 146 def _setup_logging_and_execute(multiprocessing_logging_setup_fn, *args, **kwargs): 147 148 func = args[0] 149 args = args[1:] 150 151 multiprocessing_logging_setup_fn() 152 153 return func(*args, **kwargs) 154 155 @staticmethod 156 def _execute_via_io(serialized_function_info_path): 157 158 with open(serialized_function_info_path, "rb") as file: 159 (func, args, kwargs) = pickling.load(file) 160 return func(*args, **kwargs) 161 162 @staticmethod 163 def _execute_and_persist_function(output_pickle_path, *args, **kwargs): 164 165 func = args[0] 166 args = args[1:] 167 168 result = func(*args, **kwargs) 169 170 with open(output_pickle_path, "wb") as file: 171 pickling.dump(result, file) 172 173 return result 174 175 def map_unordered(self, func, args): 176 177 futs = self.map_to_futures(func, args) 178 179 # Return a separate generator to avoid that map_unordered 180 # is executed lazily (otherwise, jobs would be submitted 181 # lazily, as well). 182 def result_generator(): 183 for fut in futures.as_completed(futs): 184 yield fut.result() 185 186 return result_generator() 187 188 def map_to_futures(self, func, args, output_pickle_path_getter=None): 189 190 if output_pickle_path_getter is not None: 191 futs = [ 192 self.submit( 193 func, 194 arg, 195 __cfut_options={ 196 "output_pickle_path": output_pickle_path_getter(arg) 197 }, 198 ) 199 for arg in args 200 ] 201 else: 202 futs = [self.submit(func, arg) for arg in args] 203 204 return futs 205 206 def forward_log(self, fut): 207 """ 208 Similar to the cluster executor, this method Takes a future from which the log file is forwarded to the active 209 process. This method blocks as long as the future is not done. 210 """ 211 212 # Since the default behavior of process pool executors is to show the log in the main process 213 # we don't need to do anything except for blocking until the future is done. 214 return fut.result() 215 216 217 class SequentialExecutor(WrappedProcessPoolExecutor): 218 """ 219 The same as WrappedProcessPoolExecutor, but always uses only one core. In essence, 220 this is a sequential executor approach, but it still makes use of the standard pool approach. 221 That way, switching between different executors should always work without any problems. 222 """ 223 224 def __init__(self, **kwargs): 225 kwargs["max_workers"] = 1 226 WrappedProcessPoolExecutor.__init__(self, **kwargs) 227 228 229 class DebugSequentialExecutor(SequentialExecutor): 230 """ 231 Only use for debugging purposes. This executor does not spawn new processes for its jobs. Therefore, 232 setting breakpoint()'s should be possible without context-related problems. 233 """ 234 235 def submit(self, *args, **kwargs): 236 237 output_pickle_path = None 238 if "__cfut_options" in kwargs: 239 output_pickle_path = kwargs["__cfut_options"]["output_pickle_path"] 240 del kwargs["__cfut_options"] 241 242 if output_pickle_path is not None: 243 fut = self._blocking_submit( 244 WrappedProcessPoolExecutor._execute_and_persist_function, 245 output_pickle_path, 246 *args, 247 **kwargs, 248 ) 249 else: 250 fut = self._blocking_submit(*args, **kwargs) 251 252 enrich_future_with_uncaught_warning(fut) 253 return fut 254 255 def _blocking_submit(self, *args, **kwargs): 256 257 func = args[0] 258 args = args[1:] 259 260 fut = futures.Future() 261 result = func(*args, **kwargs) 262 fut.set_result(result) 263 264 return fut 265 266 267 def pickle_identity(obj): 268 return pickling.loads(pickling.dumps(obj)) 269 270 271 def pickle_identity_executor(func, *args, **kwargs): 272 result = func(*args, **kwargs) 273 return pickle_identity(result) 274 275 276 class PickleExecutor(WrappedProcessPoolExecutor): 277 """ 278 The same as WrappedProcessPoolExecutor, but always pickles input and output of the jobs. 279 When using this executor for automated tests, it is ensured that using cluster executors in production 280 won't provoke pickling-related problems. 281 """ 282 283 def submit(self, _func, *_args, **_kwargs): 284 285 (func, args, kwargs) = pickle_identity((_func, _args, _kwargs)) 286 return super().submit(pickle_identity_executor, func, *args, **kwargs) 287 288 289 def get_executor(environment, **kwargs): 290 if environment == "slurm": 291 return SlurmExecutor(**kwargs) 292 elif environment == "pbs": 293 return PBSExecutor(**kwargs) 294 elif environment == "multiprocessing": 295 return WrappedProcessPoolExecutor(**kwargs) 296 elif environment == "sequential": 297 return SequentialExecutor(**kwargs) 298 elif environment == "debug_sequential": 299 return DebugSequentialExecutor(**kwargs) 300 elif environment == "test_pickling": 301 return PickleExecutor(**kwargs) 302 raise Exception("Unknown executor: {}".format(environment)) 303 [end of cluster_tools/cluster_tools/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/cluster_tools/cluster_tools/__init__.py b/cluster_tools/cluster_tools/__init__.py --- a/cluster_tools/cluster_tools/__init__.py +++ b/cluster_tools/cluster_tools/__init__.py @@ -286,12 +286,52 @@ return super().submit(pickle_identity_executor, func, *args, **kwargs) +def noop(): + return True + + +did_start_test_multiprocessing = False + + +def test_valid_multiprocessing(): + + msg = """ + ############################################################### + An attempt has been made to start a new process before the + current process has finished its bootstrapping phase. + + This probably means that you are not using fork to start your + child processes and you have forgotten to use the proper idiom + in the main module: + + if __name__ == '__main__': + main() + ... + ############################################################### + """ + + with get_executor("multiprocessing") as executor: + try: + res_fut = executor.submit(noop) + assert res_fut.result() == True, msg + except RuntimeError as exc: + raise Exception(msg) from exc + except EOFError as exc: + raise Exception(msg) from exc + + def get_executor(environment, **kwargs): + if environment == "slurm": return SlurmExecutor(**kwargs) elif environment == "pbs": return PBSExecutor(**kwargs) elif environment == "multiprocessing": + global did_start_test_multiprocessing + if not did_start_test_multiprocessing: + did_start_test_multiprocessing = True + test_valid_multiprocessing() + return WrappedProcessPoolExecutor(**kwargs) elif environment == "sequential": return SequentialExecutor(**kwargs)
{"golden_diff": "diff --git a/cluster_tools/cluster_tools/__init__.py b/cluster_tools/cluster_tools/__init__.py\n--- a/cluster_tools/cluster_tools/__init__.py\n+++ b/cluster_tools/cluster_tools/__init__.py\n@@ -286,12 +286,52 @@\n return super().submit(pickle_identity_executor, func, *args, **kwargs)\n \n \n+def noop():\n+ return True\n+\n+\n+did_start_test_multiprocessing = False\n+\n+\n+def test_valid_multiprocessing():\n+\n+ msg = \"\"\"\n+ ###############################################################\n+ An attempt has been made to start a new process before the\n+ current process has finished its bootstrapping phase.\n+\n+ This probably means that you are not using fork to start your\n+ child processes and you have forgotten to use the proper idiom\n+ in the main module:\n+\n+ if __name__ == '__main__':\n+ main()\n+ ...\n+ ###############################################################\n+ \"\"\"\n+\n+ with get_executor(\"multiprocessing\") as executor:\n+ try:\n+ res_fut = executor.submit(noop)\n+ assert res_fut.result() == True, msg\n+ except RuntimeError as exc:\n+ raise Exception(msg) from exc\n+ except EOFError as exc:\n+ raise Exception(msg) from exc\n+\n+\n def get_executor(environment, **kwargs):\n+\n if environment == \"slurm\":\n return SlurmExecutor(**kwargs)\n elif environment == \"pbs\":\n return PBSExecutor(**kwargs)\n elif environment == \"multiprocessing\":\n+ global did_start_test_multiprocessing\n+ if not did_start_test_multiprocessing:\n+ did_start_test_multiprocessing = True\n+ test_valid_multiprocessing()\n+\n return WrappedProcessPoolExecutor(**kwargs)\n elif environment == \"sequential\":\n return SequentialExecutor(**kwargs)\n", "issue": "Warn when multiprocessing fails due to missing if __name__ guard\nIf wklibs are used with multiprocessing (e.g., calling `downsample` on a layer), python's multiprocessing module will import the main module (if `spawn` is used which is the default on OS X). If that module has side effects (i.e., it is not guarded with `if __name__ == \"__main__\":`), weird errors can occur.\r\n\r\nOn start up, we could spawn a dummy job with the clustertools and if that fails, we could emit\r\n\r\n```\r\n raise AssertionError(\r\n \"\"\"\r\n #########################################\r\n #########################################\r\n #########################################\r\n #########################################\r\n #########################################\r\n ## Multiprocessing setup does not work ##\r\n ## properly. Please check that you are ##\r\n ## using an if __name__ == \"__main__\" ##\r\n ## guard in your main module. ##\r\n #########################################\r\n #########################################\r\n #########################################\r\n #########################################\r\n #########################################\r\n \"\"\"\r\n ) \r\n```\r\n\r\nto help the user with what's wrong.\n", "before_files": [{"content": "import logging\nimport multiprocessing\nimport os\nimport shutil\nimport tempfile\nfrom concurrent import futures\nfrom concurrent.futures import ProcessPoolExecutor\nfrom functools import partial\nfrom pathlib import Path\n\nfrom . import pickling\nfrom .multiprocessing_logging_handler import get_multiprocessing_logging_setup_fn\nfrom .schedulers.pbs import PBSExecutor\nfrom .schedulers.slurm import SlurmExecutor\nfrom .util import enrich_future_with_uncaught_warning\n\n\ndef get_existent_kwargs_subset(whitelist, kwargs):\n new_kwargs = {}\n for arg_name in whitelist:\n if arg_name in kwargs:\n new_kwargs[arg_name] = kwargs[arg_name]\n\n return new_kwargs\n\n\nPROCESS_POOL_KWARGS_WHITELIST = [\"max_workers\", \"mp_context\", \"initializer\", \"initargs\"]\n\n\nclass WrappedProcessPoolExecutor(ProcessPoolExecutor):\n \"\"\"\n Wraps the ProcessPoolExecutor to add various features:\n - map_to_futures and map_unordered method\n - pickling of job's output (see output_pickle_path_getter and output_pickle_path)\n - job submission via pickling to circumvent bug in python < 3.8 (see MULTIPROCESSING_VIA_IO_TMP_DIR)\n \"\"\"\n\n def __init__(self, **kwargs):\n new_kwargs = get_existent_kwargs_subset(PROCESS_POOL_KWARGS_WHITELIST, kwargs)\n\n self.did_overwrite_start_method = False\n if kwargs.get(\"start_method\", None) is not None:\n self.did_overwrite_start_method = True\n self.old_start_method = multiprocessing.get_start_method()\n start_method = kwargs[\"start_method\"]\n logging.info(\n f\"Overwriting start_method to {start_method}. Previous value: {self.old_start_method}\"\n )\n multiprocessing.set_start_method(start_method, force=True)\n\n ProcessPoolExecutor.__init__(self, **new_kwargs)\n\n def shutdown(self, *args, **kwargs):\n\n super().shutdown(*args, **kwargs)\n\n if self.did_overwrite_start_method:\n logging.info(\n f\"Restoring start_method to original value: {self.old_start_method}.\"\n )\n multiprocessing.set_start_method(self.old_start_method, force=True)\n self.old_start_method = None\n self.did_overwrite_start_method = False\n\n def submit(self, *args, **kwargs):\n\n output_pickle_path = None\n if \"__cfut_options\" in kwargs:\n output_pickle_path = kwargs[\"__cfut_options\"][\"output_pickle_path\"]\n del kwargs[\"__cfut_options\"]\n\n if os.environ.get(\"MULTIPROCESSING_VIA_IO\"):\n # If MULTIPROCESSING_VIA_IO is set, _submit_via_io is used to\n # workaround size constraints in pythons multiprocessing\n # implementation. Also see https://github.com/python/cpython/pull/10305/files\n # This should be fixed in python 3.8\n submit_fn = self._submit_via_io\n else:\n submit_fn = super().submit\n\n # Depending on the start_method and output_pickle_path, wrapper functions may need to be\n # executed in the new process context, before the actual code is ran.\n # These wrapper functions consume their arguments from *args, **kwargs and assume\n # that the next argument will be another function that is then called.\n # The call_stack holds all of these wrapper functions and their arguments in the correct order.\n # For example, call_stack = [wrapper_fn_1, wrapper_fn_1_arg_1, wrapper_fn_2, actual_fn, actual_fn_arg_1]\n # where wrapper_fn_1 is called, which eventually calls wrapper_fn_2, which eventually calls actual_fn.\n call_stack = []\n\n if multiprocessing.get_start_method() != \"fork\":\n # If a start_method other than the default \"fork\" is used, logging needs to be re-setup,\n # because the programming context is not inherited in those cases.\n multiprocessing_logging_setup_fn = get_multiprocessing_logging_setup_fn()\n call_stack.extend(\n [\n WrappedProcessPoolExecutor._setup_logging_and_execute,\n multiprocessing_logging_setup_fn,\n ]\n )\n\n if output_pickle_path is not None:\n call_stack.extend(\n [\n WrappedProcessPoolExecutor._execute_and_persist_function,\n output_pickle_path,\n ]\n )\n\n fut = submit_fn(*call_stack, *args, **kwargs)\n\n enrich_future_with_uncaught_warning(fut)\n return fut\n\n def _submit_via_io(self, *args, **kwargs):\n\n func = args[0]\n args = args[1:]\n\n opt_tmp_dir = os.environ.get(\"MULTIPROCESSING_VIA_IO_TMP_DIR\")\n if opt_tmp_dir is not None:\n dirpath = tempfile.mkdtemp(dir=opt_tmp_dir)\n else:\n dirpath = tempfile.mkdtemp()\n\n output_pickle_path = Path(dirpath) / \"jobdescription.pickle\"\n\n with open(output_pickle_path, \"wb\") as file:\n pickling.dump((func, args, kwargs), file)\n\n future = super().submit(\n WrappedProcessPoolExecutor._execute_via_io, output_pickle_path\n )\n\n future.add_done_callback(\n partial(WrappedProcessPoolExecutor._remove_tmp_file, dirpath)\n )\n\n return future\n\n @staticmethod\n def _remove_tmp_file(path, _future):\n\n shutil.rmtree(path)\n\n @staticmethod\n def _setup_logging_and_execute(multiprocessing_logging_setup_fn, *args, **kwargs):\n\n func = args[0]\n args = args[1:]\n\n multiprocessing_logging_setup_fn()\n\n return func(*args, **kwargs)\n\n @staticmethod\n def _execute_via_io(serialized_function_info_path):\n\n with open(serialized_function_info_path, \"rb\") as file:\n (func, args, kwargs) = pickling.load(file)\n return func(*args, **kwargs)\n\n @staticmethod\n def _execute_and_persist_function(output_pickle_path, *args, **kwargs):\n\n func = args[0]\n args = args[1:]\n\n result = func(*args, **kwargs)\n\n with open(output_pickle_path, \"wb\") as file:\n pickling.dump(result, file)\n\n return result\n\n def map_unordered(self, func, args):\n\n futs = self.map_to_futures(func, args)\n\n # Return a separate generator to avoid that map_unordered\n # is executed lazily (otherwise, jobs would be submitted\n # lazily, as well).\n def result_generator():\n for fut in futures.as_completed(futs):\n yield fut.result()\n\n return result_generator()\n\n def map_to_futures(self, func, args, output_pickle_path_getter=None):\n\n if output_pickle_path_getter is not None:\n futs = [\n self.submit(\n func,\n arg,\n __cfut_options={\n \"output_pickle_path\": output_pickle_path_getter(arg)\n },\n )\n for arg in args\n ]\n else:\n futs = [self.submit(func, arg) for arg in args]\n\n return futs\n\n def forward_log(self, fut):\n \"\"\"\n Similar to the cluster executor, this method Takes a future from which the log file is forwarded to the active\n process. This method blocks as long as the future is not done.\n \"\"\"\n\n # Since the default behavior of process pool executors is to show the log in the main process\n # we don't need to do anything except for blocking until the future is done.\n return fut.result()\n\n\nclass SequentialExecutor(WrappedProcessPoolExecutor):\n \"\"\"\n The same as WrappedProcessPoolExecutor, but always uses only one core. In essence,\n this is a sequential executor approach, but it still makes use of the standard pool approach.\n That way, switching between different executors should always work without any problems.\n \"\"\"\n\n def __init__(self, **kwargs):\n kwargs[\"max_workers\"] = 1\n WrappedProcessPoolExecutor.__init__(self, **kwargs)\n\n\nclass DebugSequentialExecutor(SequentialExecutor):\n \"\"\"\n Only use for debugging purposes. This executor does not spawn new processes for its jobs. Therefore,\n setting breakpoint()'s should be possible without context-related problems.\n \"\"\"\n\n def submit(self, *args, **kwargs):\n\n output_pickle_path = None\n if \"__cfut_options\" in kwargs:\n output_pickle_path = kwargs[\"__cfut_options\"][\"output_pickle_path\"]\n del kwargs[\"__cfut_options\"]\n\n if output_pickle_path is not None:\n fut = self._blocking_submit(\n WrappedProcessPoolExecutor._execute_and_persist_function,\n output_pickle_path,\n *args,\n **kwargs,\n )\n else:\n fut = self._blocking_submit(*args, **kwargs)\n\n enrich_future_with_uncaught_warning(fut)\n return fut\n\n def _blocking_submit(self, *args, **kwargs):\n\n func = args[0]\n args = args[1:]\n\n fut = futures.Future()\n result = func(*args, **kwargs)\n fut.set_result(result)\n\n return fut\n\n\ndef pickle_identity(obj):\n return pickling.loads(pickling.dumps(obj))\n\n\ndef pickle_identity_executor(func, *args, **kwargs):\n result = func(*args, **kwargs)\n return pickle_identity(result)\n\n\nclass PickleExecutor(WrappedProcessPoolExecutor):\n \"\"\"\n The same as WrappedProcessPoolExecutor, but always pickles input and output of the jobs.\n When using this executor for automated tests, it is ensured that using cluster executors in production\n won't provoke pickling-related problems.\n \"\"\"\n\n def submit(self, _func, *_args, **_kwargs):\n\n (func, args, kwargs) = pickle_identity((_func, _args, _kwargs))\n return super().submit(pickle_identity_executor, func, *args, **kwargs)\n\n\ndef get_executor(environment, **kwargs):\n if environment == \"slurm\":\n return SlurmExecutor(**kwargs)\n elif environment == \"pbs\":\n return PBSExecutor(**kwargs)\n elif environment == \"multiprocessing\":\n return WrappedProcessPoolExecutor(**kwargs)\n elif environment == \"sequential\":\n return SequentialExecutor(**kwargs)\n elif environment == \"debug_sequential\":\n return DebugSequentialExecutor(**kwargs)\n elif environment == \"test_pickling\":\n return PickleExecutor(**kwargs)\n raise Exception(\"Unknown executor: {}\".format(environment))\n", "path": "cluster_tools/cluster_tools/__init__.py"}]}
3,830
401
gh_patches_debug_15153
rasdani/github-patches
git_diff
arviz-devs__arviz-1615
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add example to az.plot_hdi that shows how to plot hdi from InferenceData posterior or posterior predictive ## Tell us about it The `az.plot_hdi` docs only show how to plot the hdi from synthetic data. What would be nice is if it also shows users how to plot the hdi from an `az.InferenceData` as thats the standard data representation most people will be working with. ![image](https://user-images.githubusercontent.com/7213793/103111300-5d0cdb00-4600-11eb-973e-3c710b4a63cc.png) ## Thoughts on implementation Use one of the precomputed datasets that has a posterior predictive group and create a plot. Here's an example of loading a dataset with posterior predictive group https://arviz-devs.github.io/arviz/api/generated/arviz.plot_ppc.html Here's an (unrelated) example where PyMC3 generates a trace and posterior predictive group and its used with `az.plot_hdi` ``` with pm.Model() as model_linear: β = pm.Normal('β', sd=10, shape=2) μ = pm.Deterministic('μ', pm.math.dot(babies[["Intercept", "Month"]], β)) ϵ = pm.HalfNormal('ϵ', sd=10) length = pm.Normal('length', mu=μ, sd=ϵ, observed=babies.Length) linear_trace = pm.sample(2000, tune=4000) linear_ppc = pm.sample_posterior_predictive(inf_data_linear) inf_data_linear = az.from_pymc3(trace=linear_trace, posterior_predictive= linear_ppc) fig, ax = plt.subplots() ax.set_ylabel("Length") ax.set_xlabel("Month"); μ_m = inf_data_linear.posterior["μ"].values.reshape(-1, babies.Length.shape[0]).mean(axis=0) ax.plot(babies.Month, μ_m, c='C4') az.plot_hdi(babies.Month, inf_data_linear.posterior_predictive["length"], hdi_prob=.94, ax=ax) ax.plot(babies.Month, babies.Length, 'C0.', alpha=0.1) plt.savefig('img/Baby_Length_Linear_Fit.png', dpi=300) ``` ![image](https://user-images.githubusercontent.com/7213793/103111469-39e32b00-4602-11eb-8ab5-a9727900ed6e.png) ![image](https://user-images.githubusercontent.com/7213793/103111474-45ceed00-4602-11eb-8ec2-83c0fd7ca9bd.png) </issue> <code> [start of arviz/plots/hdiplot.py] 1 """Plot highest density intervals for regression data.""" 2 import warnings 3 4 import numpy as np 5 from scipy.interpolate import griddata 6 from scipy.signal import savgol_filter 7 from xarray import Dataset 8 9 from ..rcparams import rcParams 10 from ..stats import hdi 11 from .plot_utils import get_plotting_function 12 13 14 def plot_hdi( 15 x, 16 y=None, 17 hdi_prob=None, 18 hdi_data=None, 19 color="C1", 20 circular=False, 21 smooth=True, 22 smooth_kwargs=None, 23 figsize=None, 24 fill_kwargs=None, 25 plot_kwargs=None, 26 hdi_kwargs=None, 27 ax=None, 28 backend=None, 29 backend_kwargs=None, 30 show=None, 31 ): 32 r""" 33 Plot HDI intervals for regression data. 34 35 Parameters 36 ---------- 37 x : array-like 38 Values to plot. 39 y : array-like, optional 40 Values from which to compute the HDI. Assumed shape ``(chain, draw, \*shape)``. 41 Only optional if hdi_data is present. 42 hdi_data : array_like, optional 43 Precomputed HDI values to use. Assumed shape is ``(*x.shape, 2)``. 44 hdi_prob : float, optional 45 Probability for the highest density interval. Defaults to ``stats.hdi_prob`` rcParam. 46 color : str, optional 47 Color used for the limits of the HDI and fill. Should be a valid matplotlib color. 48 circular : bool, optional 49 Whether to compute the HDI taking into account `x` is a circular variable 50 (in the range [-np.pi, np.pi]) or not. Defaults to False (i.e non-circular variables). 51 smooth : boolean, optional 52 If True the result will be smoothed by first computing a linear interpolation of the data 53 over a regular grid and then applying the Savitzky-Golay filter to the interpolated data. 54 Defaults to True. 55 smooth_kwargs : dict, optional 56 Additional keywords modifying the Savitzky-Golay filter. See 57 :func:`scipy:scipy.signal.savgol_filter` for details. 58 figsize : tuple 59 Figure size. If None it will be defined automatically. 60 fill_kwargs : dict, optional 61 Keywords passed to :meth:`mpl:matplotlib.axes.Axes.fill_between` 62 (use fill_kwargs={'alpha': 0} to disable fill) or to 63 :meth:`bokeh:bokeh.plotting.figure.Figure.patch`. 64 plot_kwargs : dict, optional 65 HDI limits keyword arguments, passed to :meth:`mpl:matplotlib.axes.Axes.plot` or 66 :meth:`bokeh:bokeh.plotting.figure.Figure.patch`. 67 hdi_kwargs : dict, optional 68 Keyword arguments passed to :func:`~arviz.hdi`. Ignored if ``hdi_data`` is present. 69 ax : axes, optional 70 Matplotlib axes or bokeh figures. 71 backend : {"matplotlib","bokeh"}, optional 72 Select plotting backend. 73 backend_kwargs : bool, optional 74 These are kwargs specific to the backend being used. Passed to ::`` 75 show : bool, optional 76 Call backend show function. 77 78 Returns 79 ------- 80 axes : matplotlib axes or bokeh figures 81 82 See Also 83 -------- 84 hdi : Calculate highest density interval (HDI) of array for given probability. 85 86 Examples 87 -------- 88 Plot HDI interval of simulated regression data using `y` argument: 89 90 .. plot:: 91 :context: close-figs 92 93 >>> import numpy as np 94 >>> import arviz as az 95 >>> x_data = np.random.normal(0, 1, 100) 96 >>> y_data = np.random.normal(2 + x_data * 0.5, 0.5, (2, 50, 100)) 97 >>> az.plot_hdi(x_data, y_data) 98 99 ``plot_hdi`` can also be given precalculated values with the argument ``hdi_data``. This example 100 shows how to use :func:`~arviz.hdi` to precalculate the values and pass these values to 101 ``plot_hdi``. Similarly to an example in ``hdi`` we are using the ``input_core_dims`` 102 argument of :func:`~arviz.wrap_xarray_ufunc` to manually define the dimensions over which 103 to calculate the HDI. 104 105 .. plot:: 106 :context: close-figs 107 108 >>> hdi_data = az.hdi(y_data, input_core_dims=[["draw"]]) 109 >>> ax = az.plot_hdi(x_data, hdi_data=hdi_data[0], color="r", fill_kwargs={"alpha": .2}) 110 >>> az.plot_hdi(x_data, hdi_data=hdi_data[1], color="k", ax=ax, fill_kwargs={"alpha": .2}) 111 112 """ 113 if hdi_kwargs is None: 114 hdi_kwargs = {} 115 116 x = np.asarray(x) 117 x_shape = x.shape 118 119 if y is None and hdi_data is None: 120 raise ValueError("One of {y, hdi_data} is required") 121 if hdi_data is not None and y is not None: 122 warnings.warn("Both y and hdi_data arguments present, ignoring y") 123 elif hdi_data is not None: 124 hdi_prob = ( 125 hdi_data.hdi.attrs.get("hdi_prob", np.nan) if hasattr(hdi_data, "hdi") else np.nan 126 ) 127 if isinstance(hdi_data, Dataset): 128 data_vars = list(hdi_data.data_vars) 129 if len(data_vars) != 1: 130 raise ValueError( 131 "Found several variables in hdi_data. Only single variable Datasets are " 132 "supported." 133 ) 134 hdi_data = hdi_data[data_vars[0]] 135 else: 136 y = np.asarray(y) 137 if hdi_prob is None: 138 hdi_prob = rcParams["stats.hdi_prob"] 139 else: 140 if not 1 >= hdi_prob > 0: 141 raise ValueError("The value of hdi_prob should be in the interval (0, 1]") 142 hdi_data = hdi(y, hdi_prob=hdi_prob, circular=circular, multimodal=False, **hdi_kwargs) 143 144 hdi_shape = hdi_data.shape 145 if hdi_shape[:-1] != x_shape: 146 msg = ( 147 "Dimension mismatch for x: {} and hdi: {}. Check the dimensions of y and" 148 "hdi_kwargs to make sure they are compatible" 149 ) 150 raise TypeError(msg.format(x_shape, hdi_shape)) 151 152 if smooth: 153 if smooth_kwargs is None: 154 smooth_kwargs = {} 155 smooth_kwargs.setdefault("window_length", 55) 156 smooth_kwargs.setdefault("polyorder", 2) 157 x_data = np.linspace(x.min(), x.max(), 200) 158 x_data[0] = (x_data[0] + x_data[1]) / 2 159 hdi_interp = griddata(x, hdi_data, x_data) 160 y_data = savgol_filter(hdi_interp, axis=0, **smooth_kwargs) 161 else: 162 idx = np.argsort(x) 163 x_data = x[idx] 164 y_data = hdi_data[idx] 165 166 hdiplot_kwargs = dict( 167 ax=ax, 168 x_data=x_data, 169 y_data=y_data, 170 color=color, 171 figsize=figsize, 172 plot_kwargs=plot_kwargs, 173 fill_kwargs=fill_kwargs, 174 backend_kwargs=backend_kwargs, 175 show=show, 176 ) 177 178 if backend is None: 179 backend = rcParams["plot.backend"] 180 backend = backend.lower() 181 182 plot = get_plotting_function("plot_hdi", "hdiplot", backend) 183 ax = plot(**hdiplot_kwargs) 184 return ax 185 [end of arviz/plots/hdiplot.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/arviz/plots/hdiplot.py b/arviz/plots/hdiplot.py --- a/arviz/plots/hdiplot.py +++ b/arviz/plots/hdiplot.py @@ -109,6 +109,19 @@ >>> ax = az.plot_hdi(x_data, hdi_data=hdi_data[0], color="r", fill_kwargs={"alpha": .2}) >>> az.plot_hdi(x_data, hdi_data=hdi_data[1], color="k", ax=ax, fill_kwargs={"alpha": .2}) + ``plot_hdi`` can also be used with Inference Data objects. Here we use the posterior predictive + to plot the HDI interval. + + .. plot:: + :context: close-figs + + >>> X = np.random.normal(0,1,100) + >>> Y = np.random.normal(2 + X * 0.5, 0.5, (10,100)) + >>> idata = az.from_dict(posterior={"y": Y}, constant_data={"x":X}) + >>> x_data = idata.constant_data.x + >>> y_data = idata.posterior.y + >>> az.plot_hdi(x_data, y_data) + """ if hdi_kwargs is None: hdi_kwargs = {}
{"golden_diff": "diff --git a/arviz/plots/hdiplot.py b/arviz/plots/hdiplot.py\n--- a/arviz/plots/hdiplot.py\n+++ b/arviz/plots/hdiplot.py\n@@ -109,6 +109,19 @@\n >>> ax = az.plot_hdi(x_data, hdi_data=hdi_data[0], color=\"r\", fill_kwargs={\"alpha\": .2})\n >>> az.plot_hdi(x_data, hdi_data=hdi_data[1], color=\"k\", ax=ax, fill_kwargs={\"alpha\": .2})\n \n+ ``plot_hdi`` can also be used with Inference Data objects. Here we use the posterior predictive\n+ to plot the HDI interval.\n+\n+ .. plot::\n+ :context: close-figs\n+\n+ >>> X = np.random.normal(0,1,100)\n+ >>> Y = np.random.normal(2 + X * 0.5, 0.5, (10,100))\n+ >>> idata = az.from_dict(posterior={\"y\": Y}, constant_data={\"x\":X})\n+ >>> x_data = idata.constant_data.x\n+ >>> y_data = idata.posterior.y\n+ >>> az.plot_hdi(x_data, y_data)\n+\n \"\"\"\n if hdi_kwargs is None:\n hdi_kwargs = {}\n", "issue": "Add example to az.plot_hdi that shows how to plot hdi from InferenceData posterior or posterior predictive\n## Tell us about it\r\nThe `az.plot_hdi` docs only show how to plot the hdi from synthetic data. What would be nice is if it also shows users how to plot the hdi from an `az.InferenceData` as thats the standard data representation most people will be working with.\r\n\r\n![image](https://user-images.githubusercontent.com/7213793/103111300-5d0cdb00-4600-11eb-973e-3c710b4a63cc.png)\r\n\r\n## Thoughts on implementation\r\nUse one of the precomputed datasets that has a posterior predictive group and create a plot.\r\n\r\nHere's an example of loading a dataset with posterior predictive group\r\nhttps://arviz-devs.github.io/arviz/api/generated/arviz.plot_ppc.html\r\n\r\nHere's an (unrelated) example where PyMC3 generates a trace and posterior predictive group and its used with `az.plot_hdi`\r\n\r\n```\r\nwith pm.Model() as model_linear:\r\n \u03b2 = pm.Normal('\u03b2', sd=10, shape=2)\r\n\r\n \u03bc = pm.Deterministic('\u03bc', pm.math.dot(babies[[\"Intercept\", \"Month\"]], \u03b2))\r\n \u03f5 = pm.HalfNormal('\u03f5', sd=10)\r\n\r\n length = pm.Normal('length', mu=\u03bc, sd=\u03f5, observed=babies.Length)\r\n\r\n linear_trace = pm.sample(2000, tune=4000)\r\n linear_ppc = pm.sample_posterior_predictive(inf_data_linear)\r\n inf_data_linear = az.from_pymc3(trace=linear_trace, posterior_predictive= linear_ppc)\r\n\r\nfig, ax = plt.subplots()\r\n\r\nax.set_ylabel(\"Length\")\r\nax.set_xlabel(\"Month\");\r\n\r\n\u03bc_m = inf_data_linear.posterior[\"\u03bc\"].values.reshape(-1, babies.Length.shape[0]).mean(axis=0)\r\n\r\nax.plot(babies.Month, \u03bc_m, c='C4')\r\naz.plot_hdi(babies.Month, inf_data_linear.posterior_predictive[\"length\"], hdi_prob=.94, ax=ax)\r\n\r\nax.plot(babies.Month, babies.Length, 'C0.', alpha=0.1)\r\n\r\nplt.savefig('img/Baby_Length_Linear_Fit.png', dpi=300)\r\n\r\n```\r\n![image](https://user-images.githubusercontent.com/7213793/103111469-39e32b00-4602-11eb-8ab5-a9727900ed6e.png)\r\n![image](https://user-images.githubusercontent.com/7213793/103111474-45ceed00-4602-11eb-8ec2-83c0fd7ca9bd.png)\r\n\r\n\n", "before_files": [{"content": "\"\"\"Plot highest density intervals for regression data.\"\"\"\nimport warnings\n\nimport numpy as np\nfrom scipy.interpolate import griddata\nfrom scipy.signal import savgol_filter\nfrom xarray import Dataset\n\nfrom ..rcparams import rcParams\nfrom ..stats import hdi\nfrom .plot_utils import get_plotting_function\n\n\ndef plot_hdi(\n x,\n y=None,\n hdi_prob=None,\n hdi_data=None,\n color=\"C1\",\n circular=False,\n smooth=True,\n smooth_kwargs=None,\n figsize=None,\n fill_kwargs=None,\n plot_kwargs=None,\n hdi_kwargs=None,\n ax=None,\n backend=None,\n backend_kwargs=None,\n show=None,\n):\n r\"\"\"\n Plot HDI intervals for regression data.\n\n Parameters\n ----------\n x : array-like\n Values to plot.\n y : array-like, optional\n Values from which to compute the HDI. Assumed shape ``(chain, draw, \\*shape)``.\n Only optional if hdi_data is present.\n hdi_data : array_like, optional\n Precomputed HDI values to use. Assumed shape is ``(*x.shape, 2)``.\n hdi_prob : float, optional\n Probability for the highest density interval. Defaults to ``stats.hdi_prob`` rcParam.\n color : str, optional\n Color used for the limits of the HDI and fill. Should be a valid matplotlib color.\n circular : bool, optional\n Whether to compute the HDI taking into account `x` is a circular variable\n (in the range [-np.pi, np.pi]) or not. Defaults to False (i.e non-circular variables).\n smooth : boolean, optional\n If True the result will be smoothed by first computing a linear interpolation of the data\n over a regular grid and then applying the Savitzky-Golay filter to the interpolated data.\n Defaults to True.\n smooth_kwargs : dict, optional\n Additional keywords modifying the Savitzky-Golay filter. See\n :func:`scipy:scipy.signal.savgol_filter` for details.\n figsize : tuple\n Figure size. If None it will be defined automatically.\n fill_kwargs : dict, optional\n Keywords passed to :meth:`mpl:matplotlib.axes.Axes.fill_between`\n (use fill_kwargs={'alpha': 0} to disable fill) or to\n :meth:`bokeh:bokeh.plotting.figure.Figure.patch`.\n plot_kwargs : dict, optional\n HDI limits keyword arguments, passed to :meth:`mpl:matplotlib.axes.Axes.plot` or\n :meth:`bokeh:bokeh.plotting.figure.Figure.patch`.\n hdi_kwargs : dict, optional\n Keyword arguments passed to :func:`~arviz.hdi`. Ignored if ``hdi_data`` is present.\n ax : axes, optional\n Matplotlib axes or bokeh figures.\n backend : {\"matplotlib\",\"bokeh\"}, optional\n Select plotting backend.\n backend_kwargs : bool, optional\n These are kwargs specific to the backend being used. Passed to ::``\n show : bool, optional\n Call backend show function.\n\n Returns\n -------\n axes : matplotlib axes or bokeh figures\n\n See Also\n --------\n hdi : Calculate highest density interval (HDI) of array for given probability.\n\n Examples\n --------\n Plot HDI interval of simulated regression data using `y` argument:\n\n .. plot::\n :context: close-figs\n\n >>> import numpy as np\n >>> import arviz as az\n >>> x_data = np.random.normal(0, 1, 100)\n >>> y_data = np.random.normal(2 + x_data * 0.5, 0.5, (2, 50, 100))\n >>> az.plot_hdi(x_data, y_data)\n\n ``plot_hdi`` can also be given precalculated values with the argument ``hdi_data``. This example\n shows how to use :func:`~arviz.hdi` to precalculate the values and pass these values to\n ``plot_hdi``. Similarly to an example in ``hdi`` we are using the ``input_core_dims``\n argument of :func:`~arviz.wrap_xarray_ufunc` to manually define the dimensions over which\n to calculate the HDI.\n\n .. plot::\n :context: close-figs\n\n >>> hdi_data = az.hdi(y_data, input_core_dims=[[\"draw\"]])\n >>> ax = az.plot_hdi(x_data, hdi_data=hdi_data[0], color=\"r\", fill_kwargs={\"alpha\": .2})\n >>> az.plot_hdi(x_data, hdi_data=hdi_data[1], color=\"k\", ax=ax, fill_kwargs={\"alpha\": .2})\n\n \"\"\"\n if hdi_kwargs is None:\n hdi_kwargs = {}\n\n x = np.asarray(x)\n x_shape = x.shape\n\n if y is None and hdi_data is None:\n raise ValueError(\"One of {y, hdi_data} is required\")\n if hdi_data is not None and y is not None:\n warnings.warn(\"Both y and hdi_data arguments present, ignoring y\")\n elif hdi_data is not None:\n hdi_prob = (\n hdi_data.hdi.attrs.get(\"hdi_prob\", np.nan) if hasattr(hdi_data, \"hdi\") else np.nan\n )\n if isinstance(hdi_data, Dataset):\n data_vars = list(hdi_data.data_vars)\n if len(data_vars) != 1:\n raise ValueError(\n \"Found several variables in hdi_data. Only single variable Datasets are \"\n \"supported.\"\n )\n hdi_data = hdi_data[data_vars[0]]\n else:\n y = np.asarray(y)\n if hdi_prob is None:\n hdi_prob = rcParams[\"stats.hdi_prob\"]\n else:\n if not 1 >= hdi_prob > 0:\n raise ValueError(\"The value of hdi_prob should be in the interval (0, 1]\")\n hdi_data = hdi(y, hdi_prob=hdi_prob, circular=circular, multimodal=False, **hdi_kwargs)\n\n hdi_shape = hdi_data.shape\n if hdi_shape[:-1] != x_shape:\n msg = (\n \"Dimension mismatch for x: {} and hdi: {}. Check the dimensions of y and\"\n \"hdi_kwargs to make sure they are compatible\"\n )\n raise TypeError(msg.format(x_shape, hdi_shape))\n\n if smooth:\n if smooth_kwargs is None:\n smooth_kwargs = {}\n smooth_kwargs.setdefault(\"window_length\", 55)\n smooth_kwargs.setdefault(\"polyorder\", 2)\n x_data = np.linspace(x.min(), x.max(), 200)\n x_data[0] = (x_data[0] + x_data[1]) / 2\n hdi_interp = griddata(x, hdi_data, x_data)\n y_data = savgol_filter(hdi_interp, axis=0, **smooth_kwargs)\n else:\n idx = np.argsort(x)\n x_data = x[idx]\n y_data = hdi_data[idx]\n\n hdiplot_kwargs = dict(\n ax=ax,\n x_data=x_data,\n y_data=y_data,\n color=color,\n figsize=figsize,\n plot_kwargs=plot_kwargs,\n fill_kwargs=fill_kwargs,\n backend_kwargs=backend_kwargs,\n show=show,\n )\n\n if backend is None:\n backend = rcParams[\"plot.backend\"]\n backend = backend.lower()\n\n plot = get_plotting_function(\"plot_hdi\", \"hdiplot\", backend)\n ax = plot(**hdiplot_kwargs)\n return ax\n", "path": "arviz/plots/hdiplot.py"}]}
3,297
301
gh_patches_debug_16215
rasdani/github-patches
git_diff
sktime__sktime-1705
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [DOC] transformers tutorial There should be a notebook that explains the different transformer scitypes, and how transformers work in `sktime`. </issue> <code> [start of sktime/datatypes/_series/_examples.py] 1 # -*- coding: utf-8 -*- 2 """Example generation for testing. 3 4 Exports dict of examples, useful for testing as fixtures. 5 6 example_dict: dict indexed by triple 7 1st element = mtype - str 8 2nd element = considered as this scitype - str 9 3rd element = int - index of example 10 elements are data objects, considered examples for the mtype 11 all examples with same index are considered "same" on scitype content 12 if None, indicates that representation is not possible 13 14 example_lossy: dict of bool indexed by triple 15 1st element = mtype - str 16 2nd element = considered as this scitype - str 17 3rd element = int - index of example 18 elements are bool, indicate whether representation has information removed 19 all examples with same index are considered "same" on scitype content 20 21 example_metadata: dict of metadata dict, indexed by pair 22 1st element = considered as this scitype - str 23 2nd element = int - index of example 24 (there is no "mtype" element, as properties are equal for all mtypes) 25 elements are metadata dict, as returned by check_is_mtype 26 used as expected return of check_is_mtype in tests 27 28 overall, conversions from non-lossy representations to any other ones 29 should yield the element exactly, identidally (given same index) 30 """ 31 32 import numpy as np 33 import pandas as pd 34 35 from sktime.utils.validation._dependencies import _check_soft_dependencies 36 37 example_dict = dict() 38 example_dict_lossy = dict() 39 example_dict_metadata = dict() 40 41 ### 42 # example 0: univariate 43 44 s = pd.Series([1, 4, 0.5, -3], dtype=np.float64, name="a") 45 46 example_dict[("pd.Series", "Series", 0)] = s 47 example_dict_lossy[("pd.Series", "Series", 0)] = False 48 49 df = pd.DataFrame({"a": [1, 4, 0.5, -3]}) 50 51 example_dict[("pd.DataFrame", "Series", 0)] = df 52 example_dict_lossy[("pd.DataFrame", "Series", 0)] = False 53 54 arr = np.array([[1], [4], [0.5], [-3]]) 55 56 example_dict[("np.ndarray", "Series", 0)] = arr 57 example_dict_lossy[("np.ndarray", "Series", 0)] = True 58 59 if _check_soft_dependencies("xarray", severity="none"): 60 import xarray as xr 61 62 da = xr.DataArray( 63 [[1], [4], [0.5], [-3]], 64 coords=[[0, 1, 2, 3], ["a"]], 65 ) 66 67 example_dict[("xr.DataArray", "Series", 0)] = da 68 example_dict_lossy[("xr.DataArray", "Series", 0)] = False 69 70 71 example_dict_metadata[("Series", 0)] = { 72 "is_univariate": True, 73 "is_equally_spaced": True, 74 "is_empty": False, 75 "has_nans": False, 76 } 77 78 ### 79 # example 1: multivariate 80 81 example_dict[("pd.Series", "Series", 1)] = None 82 example_dict_lossy[("pd.Series", "Series", 1)] = None 83 84 df = pd.DataFrame({"a": [1, 4, 0.5, -3], "b": [3, 7, 2, -3 / 7]}) 85 86 example_dict[("pd.DataFrame", "Series", 1)] = df 87 example_dict_lossy[("pd.DataFrame", "Series", 1)] = False 88 89 arr = np.array([[1, 3], [4, 7], [0.5, 2], [-3, -3 / 7]]) 90 91 example_dict[("np.ndarray", "Series", 1)] = arr 92 example_dict_lossy[("np.ndarray", "Series", 1)] = True 93 if _check_soft_dependencies("xarray", severity="none"): 94 import xarray as xr 95 96 da = xr.DataArray( 97 [[1, 3], [4, 7], [0.5, 2], [-3, -3 / 7]], 98 coords=[[0, 1, 2, 3], ["a", "b"]], 99 ) 100 101 example_dict[("xr.DataArray", "Series", 1)] = da 102 example_dict_lossy[("xr.DataArray", "Series", 1)] = False 103 104 example_dict_metadata[("Series", 1)] = { 105 "is_univariate": False, 106 "is_equally_spaced": True, 107 "is_empty": False, 108 "has_nans": False, 109 } 110 111 ### 112 # example 2: multivariate, positive 113 114 example_dict[("pd.Series", "Series", 2)] = None 115 example_dict_lossy[("pd.Series", "Series", 2)] = None 116 117 df = pd.DataFrame({"a": [1, 4, 0.5, 3], "b": [3, 7, 2, 3 / 7]}) 118 119 example_dict[("pd.DataFrame", "Series", 2)] = df 120 example_dict_lossy[("pd.DataFrame", "Series", 2)] = False 121 122 arr = np.array([[1, 3], [4, 7], [0.5, 2], [3, 3 / 7]]) 123 124 example_dict[("np.ndarray", "Series", 2)] = arr 125 example_dict_lossy[("np.ndarray", "Series", 2)] = True 126 127 if _check_soft_dependencies("xarray", severity="none"): 128 import xarray as xr 129 130 da = xr.DataArray( 131 [[1, 3], [4, 7], [0.5, 2], [3, 3 / 7]], 132 coords=[[0, 1, 2, 3], ["a", "b"]], 133 ) 134 135 example_dict[("xr.DataArray", "Series", 2)] = da 136 example_dict_lossy[("xr.DataArray", "Series", 2)] = False 137 138 139 example_dict_metadata[("Series", 2)] = { 140 "is_univariate": False, 141 "is_equally_spaced": True, 142 "is_empty": False, 143 "has_nans": False, 144 } 145 [end of sktime/datatypes/_series/_examples.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sktime/datatypes/_series/_examples.py b/sktime/datatypes/_series/_examples.py --- a/sktime/datatypes/_series/_examples.py +++ b/sktime/datatypes/_series/_examples.py @@ -142,3 +142,39 @@ "is_empty": False, "has_nans": False, } + +### +# example 3: univariate, positive + +s = pd.Series([1, 4, 0.5, 3], dtype=np.float64, name="a") + +example_dict[("pd.Series", "Series", 3)] = s +example_dict_lossy[("pd.Series", "Series", 3)] = False + +df = pd.DataFrame({"a": [1, 4, 0.5, 3]}) + +example_dict[("pd.DataFrame", "Series", 3)] = df +example_dict_lossy[("pd.DataFrame", "Series", 3)] = False + +arr = np.array([[1], [4], [0.5], [3]]) + +example_dict[("np.ndarray", "Series", 3)] = arr +example_dict_lossy[("np.ndarray", "Series", 3)] = True + +if _check_soft_dependencies("xarray", severity="none"): + import xarray as xr + + da = xr.DataArray( + [[1], [4], [0.5], [3]], + coords=[[0, 1, 2, 3], ["a"]], + ) + + example_dict[("xr.DataArray", "Series", 3)] = da + example_dict_lossy[("xr.DataArray", "Series", 3)] = False + +example_dict_metadata[("Series", 3)] = { + "is_univariate": True, + "is_equally_spaced": True, + "is_empty": False, + "has_nans": False, +}
{"golden_diff": "diff --git a/sktime/datatypes/_series/_examples.py b/sktime/datatypes/_series/_examples.py\n--- a/sktime/datatypes/_series/_examples.py\n+++ b/sktime/datatypes/_series/_examples.py\n@@ -142,3 +142,39 @@\n \"is_empty\": False,\n \"has_nans\": False,\n }\n+\n+###\n+# example 3: univariate, positive\n+\n+s = pd.Series([1, 4, 0.5, 3], dtype=np.float64, name=\"a\")\n+\n+example_dict[(\"pd.Series\", \"Series\", 3)] = s\n+example_dict_lossy[(\"pd.Series\", \"Series\", 3)] = False\n+\n+df = pd.DataFrame({\"a\": [1, 4, 0.5, 3]})\n+\n+example_dict[(\"pd.DataFrame\", \"Series\", 3)] = df\n+example_dict_lossy[(\"pd.DataFrame\", \"Series\", 3)] = False\n+\n+arr = np.array([[1], [4], [0.5], [3]])\n+\n+example_dict[(\"np.ndarray\", \"Series\", 3)] = arr\n+example_dict_lossy[(\"np.ndarray\", \"Series\", 3)] = True\n+\n+if _check_soft_dependencies(\"xarray\", severity=\"none\"):\n+ import xarray as xr\n+\n+ da = xr.DataArray(\n+ [[1], [4], [0.5], [3]],\n+ coords=[[0, 1, 2, 3], [\"a\"]],\n+ )\n+\n+ example_dict[(\"xr.DataArray\", \"Series\", 3)] = da\n+ example_dict_lossy[(\"xr.DataArray\", \"Series\", 3)] = False\n+\n+example_dict_metadata[(\"Series\", 3)] = {\n+ \"is_univariate\": True,\n+ \"is_equally_spaced\": True,\n+ \"is_empty\": False,\n+ \"has_nans\": False,\n+}\n", "issue": "[DOC] transformers tutorial\nThere should be a notebook that explains the different transformer scitypes, and how transformers work in `sktime`.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Example generation for testing.\n\nExports dict of examples, useful for testing as fixtures.\n\nexample_dict: dict indexed by triple\n 1st element = mtype - str\n 2nd element = considered as this scitype - str\n 3rd element = int - index of example\nelements are data objects, considered examples for the mtype\n all examples with same index are considered \"same\" on scitype content\n if None, indicates that representation is not possible\n\nexample_lossy: dict of bool indexed by triple\n 1st element = mtype - str\n 2nd element = considered as this scitype - str\n 3rd element = int - index of example\nelements are bool, indicate whether representation has information removed\n all examples with same index are considered \"same\" on scitype content\n\nexample_metadata: dict of metadata dict, indexed by pair\n 1st element = considered as this scitype - str\n 2nd element = int - index of example\n (there is no \"mtype\" element, as properties are equal for all mtypes)\nelements are metadata dict, as returned by check_is_mtype\n used as expected return of check_is_mtype in tests\n\noverall, conversions from non-lossy representations to any other ones\n should yield the element exactly, identidally (given same index)\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\nfrom sktime.utils.validation._dependencies import _check_soft_dependencies\n\nexample_dict = dict()\nexample_dict_lossy = dict()\nexample_dict_metadata = dict()\n\n###\n# example 0: univariate\n\ns = pd.Series([1, 4, 0.5, -3], dtype=np.float64, name=\"a\")\n\nexample_dict[(\"pd.Series\", \"Series\", 0)] = s\nexample_dict_lossy[(\"pd.Series\", \"Series\", 0)] = False\n\ndf = pd.DataFrame({\"a\": [1, 4, 0.5, -3]})\n\nexample_dict[(\"pd.DataFrame\", \"Series\", 0)] = df\nexample_dict_lossy[(\"pd.DataFrame\", \"Series\", 0)] = False\n\narr = np.array([[1], [4], [0.5], [-3]])\n\nexample_dict[(\"np.ndarray\", \"Series\", 0)] = arr\nexample_dict_lossy[(\"np.ndarray\", \"Series\", 0)] = True\n\nif _check_soft_dependencies(\"xarray\", severity=\"none\"):\n import xarray as xr\n\n da = xr.DataArray(\n [[1], [4], [0.5], [-3]],\n coords=[[0, 1, 2, 3], [\"a\"]],\n )\n\n example_dict[(\"xr.DataArray\", \"Series\", 0)] = da\n example_dict_lossy[(\"xr.DataArray\", \"Series\", 0)] = False\n\n\nexample_dict_metadata[(\"Series\", 0)] = {\n \"is_univariate\": True,\n \"is_equally_spaced\": True,\n \"is_empty\": False,\n \"has_nans\": False,\n}\n\n###\n# example 1: multivariate\n\nexample_dict[(\"pd.Series\", \"Series\", 1)] = None\nexample_dict_lossy[(\"pd.Series\", \"Series\", 1)] = None\n\ndf = pd.DataFrame({\"a\": [1, 4, 0.5, -3], \"b\": [3, 7, 2, -3 / 7]})\n\nexample_dict[(\"pd.DataFrame\", \"Series\", 1)] = df\nexample_dict_lossy[(\"pd.DataFrame\", \"Series\", 1)] = False\n\narr = np.array([[1, 3], [4, 7], [0.5, 2], [-3, -3 / 7]])\n\nexample_dict[(\"np.ndarray\", \"Series\", 1)] = arr\nexample_dict_lossy[(\"np.ndarray\", \"Series\", 1)] = True\nif _check_soft_dependencies(\"xarray\", severity=\"none\"):\n import xarray as xr\n\n da = xr.DataArray(\n [[1, 3], [4, 7], [0.5, 2], [-3, -3 / 7]],\n coords=[[0, 1, 2, 3], [\"a\", \"b\"]],\n )\n\n example_dict[(\"xr.DataArray\", \"Series\", 1)] = da\n example_dict_lossy[(\"xr.DataArray\", \"Series\", 1)] = False\n\nexample_dict_metadata[(\"Series\", 1)] = {\n \"is_univariate\": False,\n \"is_equally_spaced\": True,\n \"is_empty\": False,\n \"has_nans\": False,\n}\n\n###\n# example 2: multivariate, positive\n\nexample_dict[(\"pd.Series\", \"Series\", 2)] = None\nexample_dict_lossy[(\"pd.Series\", \"Series\", 2)] = None\n\ndf = pd.DataFrame({\"a\": [1, 4, 0.5, 3], \"b\": [3, 7, 2, 3 / 7]})\n\nexample_dict[(\"pd.DataFrame\", \"Series\", 2)] = df\nexample_dict_lossy[(\"pd.DataFrame\", \"Series\", 2)] = False\n\narr = np.array([[1, 3], [4, 7], [0.5, 2], [3, 3 / 7]])\n\nexample_dict[(\"np.ndarray\", \"Series\", 2)] = arr\nexample_dict_lossy[(\"np.ndarray\", \"Series\", 2)] = True\n\nif _check_soft_dependencies(\"xarray\", severity=\"none\"):\n import xarray as xr\n\n da = xr.DataArray(\n [[1, 3], [4, 7], [0.5, 2], [3, 3 / 7]],\n coords=[[0, 1, 2, 3], [\"a\", \"b\"]],\n )\n\n example_dict[(\"xr.DataArray\", \"Series\", 2)] = da\n example_dict_lossy[(\"xr.DataArray\", \"Series\", 2)] = False\n\n\nexample_dict_metadata[(\"Series\", 2)] = {\n \"is_univariate\": False,\n \"is_equally_spaced\": True,\n \"is_empty\": False,\n \"has_nans\": False,\n}\n", "path": "sktime/datatypes/_series/_examples.py"}]}
2,300
442
gh_patches_debug_8067
rasdani/github-patches
git_diff
conda__conda-7525
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> should_bypass_proxies still an issue in 4.5.7 https://github.com/conda/conda/issues/7506#issuecomment-403811279 </issue> <code> [start of conda/gateways/connection/__init__.py] 1 # -*- coding: utf-8 -*- 2 from __future__ import absolute_import, division, print_function, unicode_literals 3 from functools import partial 4 5 def should_bypass_proxies_patched(should_bypass_proxies_func, url, no_proxy): 6 # Monkey patch requests, per https://github.com/requests/requests/pull/4723 7 if url.startswith("file://"): 8 return True 9 try: 10 return should_bypass_proxies_func(url, no_proxy) 11 except TypeError: 12 # For versions of requests we shouldn't have to deal with. 13 # https://github.com/conda/conda/issues/7503 14 # https://github.com/conda/conda/issues/7506 15 return should_bypass_proxies_func(url) 16 17 18 try: 19 from requests import ConnectionError, HTTPError, Session 20 from requests.adapters import BaseAdapter, HTTPAdapter 21 from requests.auth import AuthBase, _basic_auth_str 22 from requests.cookies import extract_cookies_to_jar 23 from requests.exceptions import InvalidSchema, SSLError 24 from requests.hooks import dispatch_hook 25 from requests.models import Response 26 from requests.packages.urllib3.exceptions import InsecureRequestWarning 27 from requests.structures import CaseInsensitiveDict 28 from requests.utils import get_auth_from_url, get_netrc_auth 29 30 # monkeypatch requests 31 from requests.utils import should_bypass_proxies 32 import requests.utils 33 requests.utils.should_bypass_proxies = partial(should_bypass_proxies_patched, 34 should_bypass_proxies) 35 except ImportError: # pragma: no cover 36 from pip._vendor.requests import ConnectionError, HTTPError, Session 37 from pip._vendor.requests.adapters import BaseAdapter, HTTPAdapter 38 from pip._vendor.requests.auth import AuthBase, _basic_auth_str 39 from pip._vendor.requests.cookies import extract_cookies_to_jar 40 from pip._vendor.requests.exceptions import InvalidSchema, SSLError 41 from pip._vendor.requests.hooks import dispatch_hook 42 from pip._vendor.requests.models import Response 43 from pip._vendor.requests.packages.urllib3.exceptions import InsecureRequestWarning 44 from pip._vendor.requests.structures import CaseInsensitiveDict 45 from pip._vendor.requests.utils import get_auth_from_url, get_netrc_auth 46 47 # monkeypatch requests 48 from pip._vendor.requests.utils import should_bypass_proxies 49 import pip._vendor.requests.utils 50 pip._vendor.requests.utils.should_bypass_proxies = partial(should_bypass_proxies_patched, 51 should_bypass_proxies) 52 53 54 dispatch_hook = dispatch_hook 55 BaseAdapter = BaseAdapter 56 Response = Response 57 CaseInsensitiveDict = CaseInsensitiveDict 58 Session = Session 59 HTTPAdapter = HTTPAdapter 60 AuthBase = AuthBase 61 _basic_auth_str = _basic_auth_str 62 extract_cookies_to_jar = extract_cookies_to_jar 63 get_auth_from_url = get_auth_from_url 64 get_netrc_auth = get_netrc_auth 65 ConnectionError = ConnectionError 66 HTTPError = HTTPError 67 InvalidSchema = InvalidSchema 68 SSLError = SSLError 69 InsecureRequestWarning = InsecureRequestWarning 70 [end of conda/gateways/connection/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/conda/gateways/connection/__init__.py b/conda/gateways/connection/__init__.py --- a/conda/gateways/connection/__init__.py +++ b/conda/gateways/connection/__init__.py @@ -2,7 +2,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals from functools import partial -def should_bypass_proxies_patched(should_bypass_proxies_func, url, no_proxy): +def should_bypass_proxies_patched(should_bypass_proxies_func, url, no_proxy=None): # Monkey patch requests, per https://github.com/requests/requests/pull/4723 if url.startswith("file://"): return True
{"golden_diff": "diff --git a/conda/gateways/connection/__init__.py b/conda/gateways/connection/__init__.py\n--- a/conda/gateways/connection/__init__.py\n+++ b/conda/gateways/connection/__init__.py\n@@ -2,7 +2,7 @@\n from __future__ import absolute_import, division, print_function, unicode_literals\n from functools import partial\n \n-def should_bypass_proxies_patched(should_bypass_proxies_func, url, no_proxy):\n+def should_bypass_proxies_patched(should_bypass_proxies_func, url, no_proxy=None):\n # Monkey patch requests, per https://github.com/requests/requests/pull/4723\n if url.startswith(\"file://\"):\n return True\n", "issue": "should_bypass_proxies still an issue in 4.5.7\nhttps://github.com/conda/conda/issues/7506#issuecomment-403811279\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nfrom functools import partial\n\ndef should_bypass_proxies_patched(should_bypass_proxies_func, url, no_proxy):\n # Monkey patch requests, per https://github.com/requests/requests/pull/4723\n if url.startswith(\"file://\"):\n return True\n try:\n return should_bypass_proxies_func(url, no_proxy)\n except TypeError:\n # For versions of requests we shouldn't have to deal with.\n # https://github.com/conda/conda/issues/7503\n # https://github.com/conda/conda/issues/7506\n return should_bypass_proxies_func(url)\n\n\ntry:\n from requests import ConnectionError, HTTPError, Session\n from requests.adapters import BaseAdapter, HTTPAdapter\n from requests.auth import AuthBase, _basic_auth_str\n from requests.cookies import extract_cookies_to_jar\n from requests.exceptions import InvalidSchema, SSLError\n from requests.hooks import dispatch_hook\n from requests.models import Response\n from requests.packages.urllib3.exceptions import InsecureRequestWarning\n from requests.structures import CaseInsensitiveDict\n from requests.utils import get_auth_from_url, get_netrc_auth\n\n # monkeypatch requests\n from requests.utils import should_bypass_proxies\n import requests.utils\n requests.utils.should_bypass_proxies = partial(should_bypass_proxies_patched,\n should_bypass_proxies)\nexcept ImportError: # pragma: no cover\n from pip._vendor.requests import ConnectionError, HTTPError, Session\n from pip._vendor.requests.adapters import BaseAdapter, HTTPAdapter\n from pip._vendor.requests.auth import AuthBase, _basic_auth_str\n from pip._vendor.requests.cookies import extract_cookies_to_jar\n from pip._vendor.requests.exceptions import InvalidSchema, SSLError\n from pip._vendor.requests.hooks import dispatch_hook\n from pip._vendor.requests.models import Response\n from pip._vendor.requests.packages.urllib3.exceptions import InsecureRequestWarning\n from pip._vendor.requests.structures import CaseInsensitiveDict\n from pip._vendor.requests.utils import get_auth_from_url, get_netrc_auth\n\n # monkeypatch requests\n from pip._vendor.requests.utils import should_bypass_proxies\n import pip._vendor.requests.utils\n pip._vendor.requests.utils.should_bypass_proxies = partial(should_bypass_proxies_patched,\n should_bypass_proxies)\n\n\ndispatch_hook = dispatch_hook\nBaseAdapter = BaseAdapter\nResponse = Response\nCaseInsensitiveDict = CaseInsensitiveDict\nSession = Session\nHTTPAdapter = HTTPAdapter\nAuthBase = AuthBase\n_basic_auth_str = _basic_auth_str\nextract_cookies_to_jar = extract_cookies_to_jar\nget_auth_from_url = get_auth_from_url\nget_netrc_auth = get_netrc_auth\nConnectionError = ConnectionError\nHTTPError = HTTPError\nInvalidSchema = InvalidSchema\nSSLError = SSLError\nInsecureRequestWarning = InsecureRequestWarning\n", "path": "conda/gateways/connection/__init__.py"}]}
1,381
162
gh_patches_debug_10005
rasdani/github-patches
git_diff
iterative__dvc-9605
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> diff: unexpected error when rev_a & rev_b point to the same revision ## Description `dvc diff <rev_a> <rev_b>` outputs `ERROR: unexpected error - '<rev_a>'` when <rev_a> and <rev_b> happen to point to the same revision. ### Reproduce There is a Git repo with 4 branches: <img width="400" src="https://github.com/iterative/dvc/assets/12026360/087d85eb-56e0-4f6c-8904-bc0817bceb22"> - *main* & *new-1* point to the same revision - *new-main* & *new-2* point to the same revision Run the following commands: 1. `dvc diff main new-main` 2. `dvc diff main new-1` 3. `dvc diff new-main new-2` ### Expected All the commands should work. However, only the 1st command work and the others fail with unexpected error: ```console root@dc53a8a4f5c3:~/data/my-ds/VC-Project-Dev# dvc diff main new-main Added: data/raw/new_dir/f1.txt Deleted: data/raw/new_dir1/new.txt data/rsc/images/f1.txt Modified: data/ files summary: 1 added, 2 deleted root@dc53a8a4f5c3:~/data/my-ds/VC-Project-Dev# dvc diff main new-1 ERROR: unexpected error - 'main' Having any troubles? Hit us up at https://dvc.org/support, we are always happy to help! root@dc53a8a4f5c3:~/data/my-ds/VC-Project-Dev# dvc diff new-main new-2 ERROR: unexpected error - 'new-main' Having any troubles? Hit us up at https://dvc.org/support, we are always happy to help! ``` With verbose output: ```console # dvc diff -v new-main new-2 2023-06-14 10:03:55,347 DEBUG: v3.0.0 (pip), CPython 3.8.12 on Linux-5.4.0-84-generic-x86_64-with-glibc2.10 2023-06-14 10:03:55,347 DEBUG: command: /opt/conda/bin/dvc diff -v new-main new-2 2023-06-14 10:03:56,359 ERROR: unexpected error - 'new-main' Traceback (most recent call last): File "/opt/conda/lib/python3.8/site-packages/dvc/cli/__init__.py", line 209, in main ret = cmd.do_run() File "/opt/conda/lib/python3.8/site-packages/dvc/cli/command.py", line 26, in do_run return self.run() File "/opt/conda/lib/python3.8/site-packages/dvc/commands/diff.py", line 123, in run diff = self.repo.diff(self.args.a_rev, self.args.b_rev, self.args.targets) File "/opt/conda/lib/python3.8/site-packages/dvc/repo/__init__.py", line 64, in wrapper return f(repo, *args, **kwargs) File "/opt/conda/lib/python3.8/site-packages/dvc/repo/diff.py", line 159, in diff old = indexes[a_rev] KeyError: 'new-main' 2023-06-14 10:03:56,573 DEBUG: Version info for developers: DVC version: 3.0.0 (pip) ------------------------ Platform: Python 3.8.12 on Linux-5.4.0-84-generic-x86_64-with-glibc2.10 Subprojects: dvc_data = 1.11.0 dvc_objects = 0.23.0 dvc_render = 0.5.3 dvc_task = 0.3.0 scmrepo = 1.0.3 Supports: http (aiohttp = 3.8.4, aiohttp-retry = 2.8.3), https (aiohttp = 3.8.4, aiohttp-retry = 2.8.3), s3 (s3fs = 2023.6.0, boto3 = 1.26.76) Config: Global: /mlsteam/.config/dvc System: /etc/xdg/dvc Cache types: <https://error.dvc.org/no-dvc-cache> Caches: local Remotes: s3 Workspace directory: xfs on /dev/sdb1 Repo: dvc, git Repo.site_cache_dir: /var/tmp/dvc/repo/91479b46e9e0a07ae16a57a5b4209cf5 Having any troubles? Hit us up at https://dvc.org/support, we are always happy to help! 2023-06-14 10:03:56,577 DEBUG: Analytics is disabled. ``` ### Environment information **Output of `dvc doctor`:** ```console # dvc doctor DVC version: 3.0.0 (pip) ------------------------ Platform: Python 3.8.12 on Linux-5.4.0-84-generic-x86_64-with-glibc2.10 Subprojects: dvc_data = 1.11.0 dvc_objects = 0.23.0 dvc_render = 0.5.3 dvc_task = 0.3.0 scmrepo = 1.0.3 Supports: http (aiohttp = 3.8.4, aiohttp-retry = 2.8.3), https (aiohttp = 3.8.4, aiohttp-retry = 2.8.3), s3 (s3fs = 2023.6.0, boto3 = 1.26.76) Config: Global: /mlsteam/.config/dvc System: /etc/xdg/dvc Cache types: <https://error.dvc.org/no-dvc-cache> Caches: local Remotes: s3 Workspace directory: xfs on /dev/sdb1 Repo: dvc, git Repo.site_cache_dir: /var/tmp/dvc/repo/91479b46e9e0a07ae16a57a5b4209cf5 ``` </issue> <code> [start of dvc/repo/diff.py] 1 import errno 2 import logging 3 import os 4 from collections import defaultdict 5 from typing import Dict, List, Optional 6 7 from dvc.repo import locked 8 from dvc.ui import ui 9 10 logger = logging.getLogger(__name__) 11 12 13 def _path(entry): 14 if entry and entry.meta and entry.meta.isdir: 15 return os.path.join(*entry.key, "") 16 return os.path.join(*entry.key) 17 18 19 def _hash(entry): 20 if entry and entry.hash_info: 21 return entry.hash_info.value 22 return None 23 24 25 def _diff(old, new, with_missing=False): 26 from dvc_data.index.diff import ADD, DELETE, MODIFY, RENAME 27 from dvc_data.index.diff import diff as idiff 28 29 ret: "Dict[str, List[Dict]]" = { 30 "added": [], 31 "deleted": [], 32 "modified": [], 33 "renamed": [], 34 "not in cache": [], 35 } 36 37 for change in idiff( 38 old, 39 new, 40 with_renames=True, 41 hash_only=True, 42 ): 43 if change.typ == ADD: 44 ret["added"].append( 45 { 46 "path": _path(change.new), 47 "hash": _hash(change.new), 48 } 49 ) 50 elif change.typ == DELETE: 51 ret["deleted"].append( 52 { 53 "path": _path(change.old), 54 "hash": _hash(change.old), 55 } 56 ) 57 elif change.typ == MODIFY: 58 ret["modified"].append( 59 { 60 "path": _path(change.old), 61 "hash": { 62 "old": _hash(change.old), 63 "new": _hash(change.new), 64 }, 65 } 66 ) 67 elif change.typ == RENAME: 68 ret["renamed"].append( 69 { 70 "path": { 71 "old": _path(change.old), 72 "new": _path(change.new), 73 }, 74 "hash": _hash(change.old), 75 } 76 ) 77 78 if ( 79 with_missing 80 and change.old 81 and change.old.hash_info 82 and not old.storage_map.cache_exists(change.old) 83 ): 84 ret["not in cache"].append( 85 { 86 "path": _path(change.old), 87 "hash": _hash(change.old), 88 } 89 ) 90 91 return ret if any(ret.values()) else {} 92 93 94 @locked 95 def diff( 96 self, 97 a_rev: str = "HEAD", 98 b_rev: Optional[str] = None, 99 targets: Optional[List[str]] = None, 100 recursive: bool = False, 101 ): 102 """ 103 By default, it compares the workspace with the last commit's fs. 104 105 This implementation differs from `git diff` since DVC doesn't have 106 the concept of `index`, but it keeps the same interface, thus, 107 `dvc diff` would be the same as `dvc diff HEAD`. 108 """ 109 if self.scm.no_commits: 110 return {} 111 112 indexes = {} 113 missing_targets = defaultdict(set) 114 with_missing = False 115 if not b_rev: 116 b_rev = "workspace" 117 with_missing = True 118 119 for rev in self.brancher(revs=[a_rev, b_rev]): 120 if rev == "workspace" and b_rev != "workspace": 121 # brancher always returns workspace, but we only need to compute 122 # workspace paths/checksums if b_rev was None 123 continue 124 125 def onerror(target, _exc): 126 # pylint: disable-next=cell-var-from-loop 127 missing_targets[rev].add(target) # noqa: B023 128 129 view = self.index.targets_view( 130 targets, 131 onerror=onerror, 132 recursive=recursive, 133 ) 134 135 if rev == "workspace": 136 from .index import build_data_index 137 138 with ui.status("Building workspace index"): 139 data = build_data_index( 140 view, 141 self.root_dir, 142 self.fs, 143 compute_hash=True, 144 ) 145 else: 146 data = view.data["repo"] 147 148 assert rev not in indexes 149 indexes[rev] = data 150 151 if targets: 152 old_missing = missing_targets.get(a_rev, set()) 153 new_missing = missing_targets.get(b_rev, set()) 154 155 # check for overlapping missing targets between a_rev and b_rev 156 for target in old_missing & new_missing: 157 raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), target) 158 159 old = indexes[a_rev] 160 new = indexes[b_rev] 161 162 with ui.status("Calculating diff"): 163 return _diff(old, new, with_missing=with_missing) 164 [end of dvc/repo/diff.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/dvc/repo/diff.py b/dvc/repo/diff.py --- a/dvc/repo/diff.py +++ b/dvc/repo/diff.py @@ -156,8 +156,13 @@ for target in old_missing & new_missing: raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), target) - old = indexes[a_rev] - new = indexes[b_rev] + if len(indexes.keys()) == 1: + # both a_rev and b_rev point to the same sha, nothing to compare + old = None + new = None + else: + old = indexes[a_rev] + new = indexes[b_rev] with ui.status("Calculating diff"): return _diff(old, new, with_missing=with_missing)
{"golden_diff": "diff --git a/dvc/repo/diff.py b/dvc/repo/diff.py\n--- a/dvc/repo/diff.py\n+++ b/dvc/repo/diff.py\n@@ -156,8 +156,13 @@\n for target in old_missing & new_missing:\n raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), target)\n \n- old = indexes[a_rev]\n- new = indexes[b_rev]\n+ if len(indexes.keys()) == 1:\n+ # both a_rev and b_rev point to the same sha, nothing to compare\n+ old = None\n+ new = None\n+ else:\n+ old = indexes[a_rev]\n+ new = indexes[b_rev]\n \n with ui.status(\"Calculating diff\"):\n return _diff(old, new, with_missing=with_missing)\n", "issue": "diff: unexpected error when rev_a & rev_b point to the same revision\n## Description\r\n\r\n`dvc diff <rev_a> <rev_b>` outputs `ERROR: unexpected error - '<rev_a>'` when <rev_a> and <rev_b> happen to point to the same revision.\r\n\r\n### Reproduce\r\n\r\nThere is a Git repo with 4 branches:\r\n<img width=\"400\" src=\"https://github.com/iterative/dvc/assets/12026360/087d85eb-56e0-4f6c-8904-bc0817bceb22\">\r\n- *main* & *new-1* point to the same revision\r\n- *new-main* & *new-2* point to the same revision\r\n\r\nRun the following commands:\r\n\r\n1. `dvc diff main new-main`\r\n2. `dvc diff main new-1`\r\n3. `dvc diff new-main new-2`\r\n\r\n### Expected\r\n\r\nAll the commands should work.\r\n\r\nHowever, only the 1st command work and the others fail with unexpected error:\r\n\r\n```console\r\nroot@dc53a8a4f5c3:~/data/my-ds/VC-Project-Dev# dvc diff main new-main\r\nAdded:\r\n data/raw/new_dir/f1.txt\r\n\r\nDeleted:\r\n data/raw/new_dir1/new.txt\r\n data/rsc/images/f1.txt\r\n\r\nModified:\r\n data/\r\n\r\nfiles summary: 1 added, 2 deleted\r\nroot@dc53a8a4f5c3:~/data/my-ds/VC-Project-Dev# dvc diff main new-1\r\nERROR: unexpected error - 'main' \r\n\r\nHaving any troubles? Hit us up at https://dvc.org/support, we are always happy to help!\r\nroot@dc53a8a4f5c3:~/data/my-ds/VC-Project-Dev# dvc diff new-main new-2\r\nERROR: unexpected error - 'new-main' \r\n\r\nHaving any troubles? Hit us up at https://dvc.org/support, we are always happy to help!\r\n```\r\n\r\nWith verbose output:\r\n\r\n```console\r\n# dvc diff -v new-main new-2\r\n2023-06-14 10:03:55,347 DEBUG: v3.0.0 (pip), CPython 3.8.12 on Linux-5.4.0-84-generic-x86_64-with-glibc2.10\r\n2023-06-14 10:03:55,347 DEBUG: command: /opt/conda/bin/dvc diff -v new-main new-2\r\n2023-06-14 10:03:56,359 ERROR: unexpected error - 'new-main' \r\nTraceback (most recent call last):\r\n File \"/opt/conda/lib/python3.8/site-packages/dvc/cli/__init__.py\", line 209, in main\r\n ret = cmd.do_run()\r\n File \"/opt/conda/lib/python3.8/site-packages/dvc/cli/command.py\", line 26, in do_run\r\n return self.run()\r\n File \"/opt/conda/lib/python3.8/site-packages/dvc/commands/diff.py\", line 123, in run\r\n diff = self.repo.diff(self.args.a_rev, self.args.b_rev, self.args.targets)\r\n File \"/opt/conda/lib/python3.8/site-packages/dvc/repo/__init__.py\", line 64, in wrapper\r\n return f(repo, *args, **kwargs)\r\n File \"/opt/conda/lib/python3.8/site-packages/dvc/repo/diff.py\", line 159, in diff\r\n old = indexes[a_rev]\r\nKeyError: 'new-main'\r\n\r\n2023-06-14 10:03:56,573 DEBUG: Version info for developers:\r\nDVC version: 3.0.0 (pip)\r\n------------------------\r\nPlatform: Python 3.8.12 on Linux-5.4.0-84-generic-x86_64-with-glibc2.10\r\nSubprojects:\r\n dvc_data = 1.11.0\r\n dvc_objects = 0.23.0\r\n dvc_render = 0.5.3\r\n dvc_task = 0.3.0\r\n scmrepo = 1.0.3\r\nSupports:\r\n http (aiohttp = 3.8.4, aiohttp-retry = 2.8.3),\r\n https (aiohttp = 3.8.4, aiohttp-retry = 2.8.3),\r\n s3 (s3fs = 2023.6.0, boto3 = 1.26.76)\r\nConfig:\r\n Global: /mlsteam/.config/dvc\r\n System: /etc/xdg/dvc\r\nCache types: <https://error.dvc.org/no-dvc-cache>\r\nCaches: local\r\nRemotes: s3\r\nWorkspace directory: xfs on /dev/sdb1\r\nRepo: dvc, git\r\nRepo.site_cache_dir: /var/tmp/dvc/repo/91479b46e9e0a07ae16a57a5b4209cf5\r\n\r\nHaving any troubles? Hit us up at https://dvc.org/support, we are always happy to help!\r\n2023-06-14 10:03:56,577 DEBUG: Analytics is disabled.\r\n```\r\n\r\n### Environment information\r\n\r\n**Output of `dvc doctor`:**\r\n\r\n```console\r\n# dvc doctor\r\nDVC version: 3.0.0 (pip)\r\n------------------------\r\nPlatform: Python 3.8.12 on Linux-5.4.0-84-generic-x86_64-with-glibc2.10\r\nSubprojects:\r\n dvc_data = 1.11.0\r\n dvc_objects = 0.23.0\r\n dvc_render = 0.5.3\r\n dvc_task = 0.3.0\r\n scmrepo = 1.0.3\r\nSupports:\r\n http (aiohttp = 3.8.4, aiohttp-retry = 2.8.3),\r\n https (aiohttp = 3.8.4, aiohttp-retry = 2.8.3),\r\n s3 (s3fs = 2023.6.0, boto3 = 1.26.76)\r\nConfig:\r\n Global: /mlsteam/.config/dvc\r\n System: /etc/xdg/dvc\r\nCache types: <https://error.dvc.org/no-dvc-cache>\r\nCaches: local\r\nRemotes: s3\r\nWorkspace directory: xfs on /dev/sdb1\r\nRepo: dvc, git\r\nRepo.site_cache_dir: /var/tmp/dvc/repo/91479b46e9e0a07ae16a57a5b4209cf5\r\n```\r\n\n", "before_files": [{"content": "import errno\nimport logging\nimport os\nfrom collections import defaultdict\nfrom typing import Dict, List, Optional\n\nfrom dvc.repo import locked\nfrom dvc.ui import ui\n\nlogger = logging.getLogger(__name__)\n\n\ndef _path(entry):\n if entry and entry.meta and entry.meta.isdir:\n return os.path.join(*entry.key, \"\")\n return os.path.join(*entry.key)\n\n\ndef _hash(entry):\n if entry and entry.hash_info:\n return entry.hash_info.value\n return None\n\n\ndef _diff(old, new, with_missing=False):\n from dvc_data.index.diff import ADD, DELETE, MODIFY, RENAME\n from dvc_data.index.diff import diff as idiff\n\n ret: \"Dict[str, List[Dict]]\" = {\n \"added\": [],\n \"deleted\": [],\n \"modified\": [],\n \"renamed\": [],\n \"not in cache\": [],\n }\n\n for change in idiff(\n old,\n new,\n with_renames=True,\n hash_only=True,\n ):\n if change.typ == ADD:\n ret[\"added\"].append(\n {\n \"path\": _path(change.new),\n \"hash\": _hash(change.new),\n }\n )\n elif change.typ == DELETE:\n ret[\"deleted\"].append(\n {\n \"path\": _path(change.old),\n \"hash\": _hash(change.old),\n }\n )\n elif change.typ == MODIFY:\n ret[\"modified\"].append(\n {\n \"path\": _path(change.old),\n \"hash\": {\n \"old\": _hash(change.old),\n \"new\": _hash(change.new),\n },\n }\n )\n elif change.typ == RENAME:\n ret[\"renamed\"].append(\n {\n \"path\": {\n \"old\": _path(change.old),\n \"new\": _path(change.new),\n },\n \"hash\": _hash(change.old),\n }\n )\n\n if (\n with_missing\n and change.old\n and change.old.hash_info\n and not old.storage_map.cache_exists(change.old)\n ):\n ret[\"not in cache\"].append(\n {\n \"path\": _path(change.old),\n \"hash\": _hash(change.old),\n }\n )\n\n return ret if any(ret.values()) else {}\n\n\n@locked\ndef diff(\n self,\n a_rev: str = \"HEAD\",\n b_rev: Optional[str] = None,\n targets: Optional[List[str]] = None,\n recursive: bool = False,\n):\n \"\"\"\n By default, it compares the workspace with the last commit's fs.\n\n This implementation differs from `git diff` since DVC doesn't have\n the concept of `index`, but it keeps the same interface, thus,\n `dvc diff` would be the same as `dvc diff HEAD`.\n \"\"\"\n if self.scm.no_commits:\n return {}\n\n indexes = {}\n missing_targets = defaultdict(set)\n with_missing = False\n if not b_rev:\n b_rev = \"workspace\"\n with_missing = True\n\n for rev in self.brancher(revs=[a_rev, b_rev]):\n if rev == \"workspace\" and b_rev != \"workspace\":\n # brancher always returns workspace, but we only need to compute\n # workspace paths/checksums if b_rev was None\n continue\n\n def onerror(target, _exc):\n # pylint: disable-next=cell-var-from-loop\n missing_targets[rev].add(target) # noqa: B023\n\n view = self.index.targets_view(\n targets,\n onerror=onerror,\n recursive=recursive,\n )\n\n if rev == \"workspace\":\n from .index import build_data_index\n\n with ui.status(\"Building workspace index\"):\n data = build_data_index(\n view,\n self.root_dir,\n self.fs,\n compute_hash=True,\n )\n else:\n data = view.data[\"repo\"]\n\n assert rev not in indexes\n indexes[rev] = data\n\n if targets:\n old_missing = missing_targets.get(a_rev, set())\n new_missing = missing_targets.get(b_rev, set())\n\n # check for overlapping missing targets between a_rev and b_rev\n for target in old_missing & new_missing:\n raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), target)\n\n old = indexes[a_rev]\n new = indexes[b_rev]\n\n with ui.status(\"Calculating diff\"):\n return _diff(old, new, with_missing=with_missing)\n", "path": "dvc/repo/diff.py"}]}
3,462
187
gh_patches_debug_18080
rasdani/github-patches
git_diff
mozilla__bugbug-1251
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Set label as 0 in the QANeeded model when one of the qa flags are removed </issue> <code> [start of bugbug/models/qaneeded.py] 1 # -*- coding: utf-8 -*- 2 # This Source Code Form is subject to the terms of the Mozilla Public 3 # License, v. 2.0. If a copy of the MPL was not distributed with this file, 4 # You can obtain one at http://mozilla.org/MPL/2.0/. 5 6 import xgboost 7 from imblearn.under_sampling import RandomUnderSampler 8 from sklearn.compose import ColumnTransformer 9 from sklearn.feature_extraction import DictVectorizer 10 from sklearn.pipeline import Pipeline 11 12 from bugbug import bug_features, bugzilla, feature_cleanup 13 from bugbug.model import BugModel 14 15 16 class QANeededModel(BugModel): 17 def __init__(self, lemmatization=False): 18 BugModel.__init__(self, lemmatization) 19 20 self.sampler = RandomUnderSampler(random_state=0) 21 22 feature_extractors = [ 23 bug_features.has_str(), 24 bug_features.has_regression_range(), 25 bug_features.severity(), 26 bug_features.keywords({"qawanted"}), 27 bug_features.is_coverity_issue(), 28 bug_features.has_crash_signature(), 29 bug_features.has_url(), 30 bug_features.has_w3c_url(), 31 bug_features.has_github_url(), 32 bug_features.whiteboard(), 33 bug_features.patches(), 34 bug_features.landings(), 35 ] 36 37 cleanup_functions = [ 38 feature_cleanup.fileref(), 39 feature_cleanup.url(), 40 feature_cleanup.synonyms(), 41 ] 42 43 self.extraction_pipeline = Pipeline( 44 [ 45 ( 46 "bug_extractor", 47 bug_features.BugExtractor( 48 feature_extractors, 49 cleanup_functions, 50 rollback=True, 51 rollback_when=self.rollback, 52 ), 53 ), 54 ( 55 "union", 56 ColumnTransformer( 57 [ 58 ("data", DictVectorizer(), "data"), 59 ("title", self.text_vectorizer(), "title"), 60 ("comments", self.text_vectorizer(), "comments"), 61 ] 62 ), 63 ), 64 ] 65 ) 66 67 self.clf = xgboost.XGBClassifier(n_jobs=16) 68 self.clf.set_params(predictor="cpu_predictor") 69 70 def rollback(self, change): 71 return any( 72 change["added"].startswith(prefix) 73 for prefix in ["qawanted", "qe-verify", "qaurgent"] 74 ) 75 76 def get_labels(self): 77 classes = {} 78 79 for bug_data in bugzilla.get_bugs(): 80 bug_id = int(bug_data["id"]) 81 82 found_qa = False 83 if any( 84 keyword.startswith(label) 85 for keyword in bug_data["keywords"] 86 for label in ["qawanted", "qe-verify", "qaurgent"] 87 ): 88 classes[bug_id] = 1 89 found_qa = True 90 91 if not found_qa: 92 for entry in bug_data["history"]: 93 for change in entry["changes"]: 94 if any( 95 change["added"].startswith(label) 96 for label in ["qawanted", "qe-verify", "qaurgent"] 97 ): 98 classes[bug_id] = 1 99 if bug_id not in classes: 100 classes[bug_id] = 0 101 102 return classes, [0, 1] 103 104 def get_feature_names(self): 105 return self.extraction_pipeline.named_steps["union"].get_feature_names() 106 [end of bugbug/models/qaneeded.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bugbug/models/qaneeded.py b/bugbug/models/qaneeded.py --- a/bugbug/models/qaneeded.py +++ b/bugbug/models/qaneeded.py @@ -91,11 +91,18 @@ if not found_qa: for entry in bug_data["history"]: for change in entry["changes"]: + if any( + change["removed"].startswith(label) + for label in ["qawanted", "qe-verify", "qaurgent"] + ): + classes[bug_id] = 0 + if any( change["added"].startswith(label) for label in ["qawanted", "qe-verify", "qaurgent"] ): classes[bug_id] = 1 + if bug_id not in classes: classes[bug_id] = 0
{"golden_diff": "diff --git a/bugbug/models/qaneeded.py b/bugbug/models/qaneeded.py\n--- a/bugbug/models/qaneeded.py\n+++ b/bugbug/models/qaneeded.py\n@@ -91,11 +91,18 @@\n if not found_qa:\n for entry in bug_data[\"history\"]:\n for change in entry[\"changes\"]:\n+ if any(\n+ change[\"removed\"].startswith(label)\n+ for label in [\"qawanted\", \"qe-verify\", \"qaurgent\"]\n+ ):\n+ classes[bug_id] = 0\n+\n if any(\n change[\"added\"].startswith(label)\n for label in [\"qawanted\", \"qe-verify\", \"qaurgent\"]\n ):\n classes[bug_id] = 1\n+\n if bug_id not in classes:\n classes[bug_id] = 0\n", "issue": "Set label as 0 in the QANeeded model when one of the qa flags are removed\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport xgboost\nfrom imblearn.under_sampling import RandomUnderSampler\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.pipeline import Pipeline\n\nfrom bugbug import bug_features, bugzilla, feature_cleanup\nfrom bugbug.model import BugModel\n\n\nclass QANeededModel(BugModel):\n def __init__(self, lemmatization=False):\n BugModel.__init__(self, lemmatization)\n\n self.sampler = RandomUnderSampler(random_state=0)\n\n feature_extractors = [\n bug_features.has_str(),\n bug_features.has_regression_range(),\n bug_features.severity(),\n bug_features.keywords({\"qawanted\"}),\n bug_features.is_coverity_issue(),\n bug_features.has_crash_signature(),\n bug_features.has_url(),\n bug_features.has_w3c_url(),\n bug_features.has_github_url(),\n bug_features.whiteboard(),\n bug_features.patches(),\n bug_features.landings(),\n ]\n\n cleanup_functions = [\n feature_cleanup.fileref(),\n feature_cleanup.url(),\n feature_cleanup.synonyms(),\n ]\n\n self.extraction_pipeline = Pipeline(\n [\n (\n \"bug_extractor\",\n bug_features.BugExtractor(\n feature_extractors,\n cleanup_functions,\n rollback=True,\n rollback_when=self.rollback,\n ),\n ),\n (\n \"union\",\n ColumnTransformer(\n [\n (\"data\", DictVectorizer(), \"data\"),\n (\"title\", self.text_vectorizer(), \"title\"),\n (\"comments\", self.text_vectorizer(), \"comments\"),\n ]\n ),\n ),\n ]\n )\n\n self.clf = xgboost.XGBClassifier(n_jobs=16)\n self.clf.set_params(predictor=\"cpu_predictor\")\n\n def rollback(self, change):\n return any(\n change[\"added\"].startswith(prefix)\n for prefix in [\"qawanted\", \"qe-verify\", \"qaurgent\"]\n )\n\n def get_labels(self):\n classes = {}\n\n for bug_data in bugzilla.get_bugs():\n bug_id = int(bug_data[\"id\"])\n\n found_qa = False\n if any(\n keyword.startswith(label)\n for keyword in bug_data[\"keywords\"]\n for label in [\"qawanted\", \"qe-verify\", \"qaurgent\"]\n ):\n classes[bug_id] = 1\n found_qa = True\n\n if not found_qa:\n for entry in bug_data[\"history\"]:\n for change in entry[\"changes\"]:\n if any(\n change[\"added\"].startswith(label)\n for label in [\"qawanted\", \"qe-verify\", \"qaurgent\"]\n ):\n classes[bug_id] = 1\n if bug_id not in classes:\n classes[bug_id] = 0\n\n return classes, [0, 1]\n\n def get_feature_names(self):\n return self.extraction_pipeline.named_steps[\"union\"].get_feature_names()\n", "path": "bugbug/models/qaneeded.py"}]}
1,456
193
gh_patches_debug_6622
rasdani/github-patches
git_diff
napari__napari-5467
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> napari 0.4.17 fails with QtPoll error on a virgin install on windows. Works on the nightly build ## 🐛 Bug <!-- A clear and concise description of what the bug is. --> ## To Reproduce Steps to reproduce the behavior: 1. I am running the code with Python 3.10.9. 2. Created a new venv to ensure that we don't have any dead dependencies lying around. 3. Ran <b>pip install "napari[pyqt5]" </b> to install the current release version of napari. 4. Attempted to run napari. The code fails with the error stack below. It also fails if I run napari -info. The stack trace is below <!-- If you have a code sample, error messages, stack traces, please provide it here as well --> (virginpip) C:\Users\agovi>napari -info Traceback (most recent call last): File "C:\Users\agovi\AppData\Local\Programs\Python\Python310\lib\runpy.py", line 196, in _run_module_as_main return _run_code(code, main_globals, None, File "C:\Users\agovi\AppData\Local\Programs\Python\Python310\lib\runpy.py", line 86, in _run_code exec(code, run_globals) File "C:\Users\agovi\.virtualenvs\virginpip\Scripts\napari.exe\__main__.py", line 7, in <module> File "C:\Users\agovi\.virtualenvs\virginpip\lib\site-packages\napari\__main__.py", line 561, in main _run() File "C:\Users\agovi\.virtualenvs\virginpip\lib\site-packages\napari\__main__.py", line 327, in _run viewer = Viewer() File "C:\Users\agovi\.virtualenvs\virginpip\lib\site-packages\napari\viewer.py", line 67, in __init__ self._window = Window(self, show=show) File "C:\Users\agovi\.virtualenvs\virginpip\lib\site-packages\napari\_qt\qt_main_window.py", line 463, in __init__ self._qt_window = _QtMainWindow(viewer) File "C:\Users\agovi\.virtualenvs\virginpip\lib\site-packages\napari\_qt\qt_main_window.py", line 88, in __init__ self._qt_viewer = QtViewer(viewer, show_welcome_screen=True) File "C:\Users\agovi\.virtualenvs\virginpip\lib\site-packages\napari\_qt\qt_viewer.py", line 269, in __init__ self._qt_poll = _create_qt_poll(self, self.viewer.camera) File "C:\Users\agovi\.virtualenvs\virginpip\lib\site-packages\napari\_qt\qt_viewer.py", line 1254, in _create_qt_poll qt_poll = QtPoll(parent) File "C:\Users\agovi\.virtualenvs\virginpip\lib\site-packages\napari\_qt\experimental\qt_poll.py", line 55, in __init__ self.timer.setInterval(POLL_INTERVAL_MS) TypeError: setInterval(self, int): argument 1 has unexpected type 'float' ## Expected behavior <!-- A clear and concise description of what you expected to happen. --> Expected napari to work correctly. This fails with this version of napari. The previous and the current nightly build are working correctly. Have a similar problem with other users. Have included requirements.txt file that I generated with this report. ## Environment - Cannot get the napari up to show the napari info above. Have extracted the napari version from the pip requirements.txt file. - Any other relevant information: ## Additional context <!-- Add any other context about the problem here. --> This is on a windows machine. [requirements.txt](https://github.com/napari/napari/files/10249367/requirements.txt) </issue> <code> [start of napari/_qt/experimental/qt_poll.py] 1 """QtPoll class. 2 3 Poll visuals or other objects so they can do things even when the 4 mouse/camera are not moving. Usually for just a short period of time. 5 """ 6 import time 7 from typing import Optional 8 9 from qtpy.QtCore import QEvent, QObject, QTimer 10 11 from napari.utils.events import EmitterGroup 12 13 # When running a timer we use this interval. 14 POLL_INTERVAL_MS = 16.666 # About 60HZ 15 16 # If called more often than this we ignore it. Our _on_camera() method can 17 # be called multiple times in on frame. It can get called because the 18 # "center" changed and then the "zoom" changed even if it was really from 19 # the same camera movement. 20 IGNORE_INTERVAL_MS = 10 21 22 23 class QtPoll(QObject): 24 """Polls anything once per frame via an event. 25 26 QtPoll was first created for VispyTiledImageLayer. It polls the visual 27 when the camera moves. However, we also want visuals to keep loading 28 chunks even when the camera stops. We want the visual to finish up 29 anything that was in progress. Before it goes fully idle. 30 31 QtPoll will poll those visuals using a timer. If the visual says the 32 event was "handled" it means the visual has more work to do. If that 33 happens, QtPoll will continue to poll and draw the visual it until the 34 visual is done with the in-progress work. 35 36 An analogy is a snow globe. The user moving the camera shakes up the 37 snow globe. We need to keep polling/drawing things until all the snow 38 settles down. Then everything will stay completely still until the 39 camera is moved again, shaking up the globe once more. 40 41 Parameters 42 ---------- 43 parent : QObject 44 Parent Qt object. 45 camera : Camera 46 The viewer's main camera. 47 """ 48 49 def __init__(self, parent: QObject): 50 super().__init__(parent) 51 52 self.events = EmitterGroup(source=self, poll=None) 53 54 self.timer = QTimer() 55 self.timer.setInterval(POLL_INTERVAL_MS) 56 self.timer.timeout.connect(self._on_timer) 57 self._interval = IntervalTimer() 58 59 def on_camera(self) -> None: 60 """Called when camera view changes.""" 61 # When the mouse button is down and the camera is being zoomed 62 # or panned, timer events are starved out. So we call poll 63 # explicitly here. It will start the timer if needed so that 64 # polling can continue even after the camera stops moving. 65 self._poll() 66 67 def wake_up(self) -> None: 68 """Wake up QtPoll so it starts polling.""" 69 # Start the timer so that we start polling. We used to poll once 70 # right away here, but it led to crashes. Because we polled during 71 # a paintGL event? 72 if not self.timer.isActive(): 73 self.timer.start() 74 75 def _on_timer(self) -> None: 76 """Called when the timer is running. 77 78 The timer is running which means someone we are polling still has 79 work to do. 80 """ 81 self._poll() 82 83 def _poll(self) -> None: 84 """Called on camera move or with the timer.""" 85 86 # Between timers and camera and wake_up() we might be called multiple 87 # times in quick succession. Use an IntervalTimer to ignore these 88 # near-duplicate calls. 89 if self._interval.elapsed_ms < IGNORE_INTERVAL_MS: 90 return 91 92 # Poll all listeners. 93 event = self.events.poll() 94 95 # Listeners will "handle" the event if they need more polling. If 96 # no one needs polling, then we can stop the timer. 97 if not event.handled: 98 self.timer.stop() 99 return 100 101 # Someone handled the event, so they want to be polled even if 102 # the mouse doesn't move. So start the timer if needed. 103 if not self.timer.isActive(): 104 self.timer.start() 105 106 def closeEvent(self, _event: QEvent) -> None: 107 """Cleanup and close. 108 109 Parameters 110 ---------- 111 _event : QEvent 112 The close event. 113 """ 114 self.timer.stop() 115 self.deleteLater() 116 117 118 class IntervalTimer: 119 """Time the interval between subsequent calls to our elapsed property.""" 120 121 def __init__(self): 122 self._last: Optional[float] = None 123 124 @property 125 def elapsed_ms(self) -> float: 126 """The elapsed time since the last call to this property.""" 127 now = time.time() 128 elapsed_seconds = 0 if self._last is None else now - self._last 129 self._last = now 130 return elapsed_seconds * 1000 131 [end of napari/_qt/experimental/qt_poll.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/napari/_qt/experimental/qt_poll.py b/napari/_qt/experimental/qt_poll.py --- a/napari/_qt/experimental/qt_poll.py +++ b/napari/_qt/experimental/qt_poll.py @@ -11,7 +11,7 @@ from napari.utils.events import EmitterGroup # When running a timer we use this interval. -POLL_INTERVAL_MS = 16.666 # About 60HZ +POLL_INTERVAL_MS = 16 # About 60HZ, needs to be an int for QTimer setInterval # If called more often than this we ignore it. Our _on_camera() method can # be called multiple times in on frame. It can get called because the
{"golden_diff": "diff --git a/napari/_qt/experimental/qt_poll.py b/napari/_qt/experimental/qt_poll.py\n--- a/napari/_qt/experimental/qt_poll.py\n+++ b/napari/_qt/experimental/qt_poll.py\n@@ -11,7 +11,7 @@\n from napari.utils.events import EmitterGroup\n \n # When running a timer we use this interval.\n-POLL_INTERVAL_MS = 16.666 # About 60HZ\n+POLL_INTERVAL_MS = 16 # About 60HZ, needs to be an int for QTimer setInterval\n \n # If called more often than this we ignore it. Our _on_camera() method can\n # be called multiple times in on frame. It can get called because the\n", "issue": "napari 0.4.17 fails with QtPoll error on a virgin install on windows. Works on the nightly build\n## \ud83d\udc1b Bug\r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\n## To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\n1. I am running the code with Python 3.10.9.\r\n2. Created a new venv to ensure that we don't have any dead dependencies lying around.\r\n3. Ran <b>pip install \"napari[pyqt5]\" </b> to install the current release version of napari.\r\n4. Attempted to run napari. The code fails with the error stack below. It also fails if I run napari -info. The stack trace is below\r\n\r\n<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->\r\n\r\n(virginpip) C:\\Users\\agovi>napari -info\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\agovi\\AppData\\Local\\Programs\\Python\\Python310\\lib\\runpy.py\", line 196, in _run_module_as_main\r\n return _run_code(code, main_globals, None,\r\n File \"C:\\Users\\agovi\\AppData\\Local\\Programs\\Python\\Python310\\lib\\runpy.py\", line 86, in _run_code\r\n exec(code, run_globals)\r\n File \"C:\\Users\\agovi\\.virtualenvs\\virginpip\\Scripts\\napari.exe\\__main__.py\", line 7, in <module>\r\n File \"C:\\Users\\agovi\\.virtualenvs\\virginpip\\lib\\site-packages\\napari\\__main__.py\", line 561, in main\r\n _run()\r\n File \"C:\\Users\\agovi\\.virtualenvs\\virginpip\\lib\\site-packages\\napari\\__main__.py\", line 327, in _run\r\n viewer = Viewer()\r\n File \"C:\\Users\\agovi\\.virtualenvs\\virginpip\\lib\\site-packages\\napari\\viewer.py\", line 67, in __init__\r\n self._window = Window(self, show=show)\r\n File \"C:\\Users\\agovi\\.virtualenvs\\virginpip\\lib\\site-packages\\napari\\_qt\\qt_main_window.py\", line 463, in __init__\r\n self._qt_window = _QtMainWindow(viewer)\r\n File \"C:\\Users\\agovi\\.virtualenvs\\virginpip\\lib\\site-packages\\napari\\_qt\\qt_main_window.py\", line 88, in __init__\r\n self._qt_viewer = QtViewer(viewer, show_welcome_screen=True)\r\n File \"C:\\Users\\agovi\\.virtualenvs\\virginpip\\lib\\site-packages\\napari\\_qt\\qt_viewer.py\", line 269, in __init__\r\n self._qt_poll = _create_qt_poll(self, self.viewer.camera)\r\n File \"C:\\Users\\agovi\\.virtualenvs\\virginpip\\lib\\site-packages\\napari\\_qt\\qt_viewer.py\", line 1254, in _create_qt_poll\r\n qt_poll = QtPoll(parent)\r\n File \"C:\\Users\\agovi\\.virtualenvs\\virginpip\\lib\\site-packages\\napari\\_qt\\experimental\\qt_poll.py\", line 55, in __init__\r\n self.timer.setInterval(POLL_INTERVAL_MS)\r\nTypeError: setInterval(self, int): argument 1 has unexpected type 'float'\r\n\r\n\r\n## Expected behavior\r\n\r\n<!-- A clear and concise description of what you expected to happen. -->\r\nExpected napari to work correctly. This fails with this version of napari. The previous and the current nightly build are working correctly. \r\nHave a similar problem with other users. Have included requirements.txt file that I generated with this report. \r\n\r\n## Environment\r\n\r\n - Cannot get the napari up to show the napari info above. Have extracted the napari version from the pip requirements.txt file. \r\n\r\n - Any other relevant information:\r\n\r\n## Additional context\r\n\r\n<!-- Add any other context about the problem here. -->\r\nThis is on a windows machine. \r\n[requirements.txt](https://github.com/napari/napari/files/10249367/requirements.txt)\r\n\n", "before_files": [{"content": "\"\"\"QtPoll class.\n\nPoll visuals or other objects so they can do things even when the\nmouse/camera are not moving. Usually for just a short period of time.\n\"\"\"\nimport time\nfrom typing import Optional\n\nfrom qtpy.QtCore import QEvent, QObject, QTimer\n\nfrom napari.utils.events import EmitterGroup\n\n# When running a timer we use this interval.\nPOLL_INTERVAL_MS = 16.666 # About 60HZ\n\n# If called more often than this we ignore it. Our _on_camera() method can\n# be called multiple times in on frame. It can get called because the\n# \"center\" changed and then the \"zoom\" changed even if it was really from\n# the same camera movement.\nIGNORE_INTERVAL_MS = 10\n\n\nclass QtPoll(QObject):\n \"\"\"Polls anything once per frame via an event.\n\n QtPoll was first created for VispyTiledImageLayer. It polls the visual\n when the camera moves. However, we also want visuals to keep loading\n chunks even when the camera stops. We want the visual to finish up\n anything that was in progress. Before it goes fully idle.\n\n QtPoll will poll those visuals using a timer. If the visual says the\n event was \"handled\" it means the visual has more work to do. If that\n happens, QtPoll will continue to poll and draw the visual it until the\n visual is done with the in-progress work.\n\n An analogy is a snow globe. The user moving the camera shakes up the\n snow globe. We need to keep polling/drawing things until all the snow\n settles down. Then everything will stay completely still until the\n camera is moved again, shaking up the globe once more.\n\n Parameters\n ----------\n parent : QObject\n Parent Qt object.\n camera : Camera\n The viewer's main camera.\n \"\"\"\n\n def __init__(self, parent: QObject):\n super().__init__(parent)\n\n self.events = EmitterGroup(source=self, poll=None)\n\n self.timer = QTimer()\n self.timer.setInterval(POLL_INTERVAL_MS)\n self.timer.timeout.connect(self._on_timer)\n self._interval = IntervalTimer()\n\n def on_camera(self) -> None:\n \"\"\"Called when camera view changes.\"\"\"\n # When the mouse button is down and the camera is being zoomed\n # or panned, timer events are starved out. So we call poll\n # explicitly here. It will start the timer if needed so that\n # polling can continue even after the camera stops moving.\n self._poll()\n\n def wake_up(self) -> None:\n \"\"\"Wake up QtPoll so it starts polling.\"\"\"\n # Start the timer so that we start polling. We used to poll once\n # right away here, but it led to crashes. Because we polled during\n # a paintGL event?\n if not self.timer.isActive():\n self.timer.start()\n\n def _on_timer(self) -> None:\n \"\"\"Called when the timer is running.\n\n The timer is running which means someone we are polling still has\n work to do.\n \"\"\"\n self._poll()\n\n def _poll(self) -> None:\n \"\"\"Called on camera move or with the timer.\"\"\"\n\n # Between timers and camera and wake_up() we might be called multiple\n # times in quick succession. Use an IntervalTimer to ignore these\n # near-duplicate calls.\n if self._interval.elapsed_ms < IGNORE_INTERVAL_MS:\n return\n\n # Poll all listeners.\n event = self.events.poll()\n\n # Listeners will \"handle\" the event if they need more polling. If\n # no one needs polling, then we can stop the timer.\n if not event.handled:\n self.timer.stop()\n return\n\n # Someone handled the event, so they want to be polled even if\n # the mouse doesn't move. So start the timer if needed.\n if not self.timer.isActive():\n self.timer.start()\n\n def closeEvent(self, _event: QEvent) -> None:\n \"\"\"Cleanup and close.\n\n Parameters\n ----------\n _event : QEvent\n The close event.\n \"\"\"\n self.timer.stop()\n self.deleteLater()\n\n\nclass IntervalTimer:\n \"\"\"Time the interval between subsequent calls to our elapsed property.\"\"\"\n\n def __init__(self):\n self._last: Optional[float] = None\n\n @property\n def elapsed_ms(self) -> float:\n \"\"\"The elapsed time since the last call to this property.\"\"\"\n now = time.time()\n elapsed_seconds = 0 if self._last is None else now - self._last\n self._last = now\n return elapsed_seconds * 1000\n", "path": "napari/_qt/experimental/qt_poll.py"}]}
2,788
165
gh_patches_debug_30798
rasdani/github-patches
git_diff
matrix-org__synapse-3914
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Split fed worker & sender config means destination retry sched doesn't invalidate when you receive traffic </issue> <code> [start of synapse/storage/transactions.py] 1 # -*- coding: utf-8 -*- 2 # Copyright 2014-2016 OpenMarket Ltd 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 16 import logging 17 from collections import namedtuple 18 19 import six 20 21 from canonicaljson import encode_canonical_json 22 23 from twisted.internet import defer 24 25 from synapse.metrics.background_process_metrics import run_as_background_process 26 from synapse.util.caches.descriptors import cached 27 28 from ._base import SQLBaseStore, db_to_json 29 30 # py2 sqlite has buffer hardcoded as only binary type, so we must use it, 31 # despite being deprecated and removed in favor of memoryview 32 if six.PY2: 33 db_binary_type = buffer 34 else: 35 db_binary_type = memoryview 36 37 logger = logging.getLogger(__name__) 38 39 40 _TransactionRow = namedtuple( 41 "_TransactionRow", ( 42 "id", "transaction_id", "destination", "ts", "response_code", 43 "response_json", 44 ) 45 ) 46 47 _UpdateTransactionRow = namedtuple( 48 "_TransactionRow", ( 49 "response_code", "response_json", 50 ) 51 ) 52 53 54 class TransactionStore(SQLBaseStore): 55 """A collection of queries for handling PDUs. 56 """ 57 58 def __init__(self, db_conn, hs): 59 super(TransactionStore, self).__init__(db_conn, hs) 60 61 self._clock.looping_call(self._start_cleanup_transactions, 30 * 60 * 1000) 62 63 def get_received_txn_response(self, transaction_id, origin): 64 """For an incoming transaction from a given origin, check if we have 65 already responded to it. If so, return the response code and response 66 body (as a dict). 67 68 Args: 69 transaction_id (str) 70 origin(str) 71 72 Returns: 73 tuple: None if we have not previously responded to 74 this transaction or a 2-tuple of (int, dict) 75 """ 76 77 return self.runInteraction( 78 "get_received_txn_response", 79 self._get_received_txn_response, transaction_id, origin 80 ) 81 82 def _get_received_txn_response(self, txn, transaction_id, origin): 83 result = self._simple_select_one_txn( 84 txn, 85 table="received_transactions", 86 keyvalues={ 87 "transaction_id": transaction_id, 88 "origin": origin, 89 }, 90 retcols=( 91 "transaction_id", "origin", "ts", "response_code", "response_json", 92 "has_been_referenced", 93 ), 94 allow_none=True, 95 ) 96 97 if result and result["response_code"]: 98 return result["response_code"], db_to_json(result["response_json"]) 99 100 else: 101 return None 102 103 def set_received_txn_response(self, transaction_id, origin, code, 104 response_dict): 105 """Persist the response we returened for an incoming transaction, and 106 should return for subsequent transactions with the same transaction_id 107 and origin. 108 109 Args: 110 txn 111 transaction_id (str) 112 origin (str) 113 code (int) 114 response_json (str) 115 """ 116 117 return self._simple_insert( 118 table="received_transactions", 119 values={ 120 "transaction_id": transaction_id, 121 "origin": origin, 122 "response_code": code, 123 "response_json": db_binary_type(encode_canonical_json(response_dict)), 124 "ts": self._clock.time_msec(), 125 }, 126 or_ignore=True, 127 desc="set_received_txn_response", 128 ) 129 130 def prep_send_transaction(self, transaction_id, destination, 131 origin_server_ts): 132 """Persists an outgoing transaction and calculates the values for the 133 previous transaction id list. 134 135 This should be called before sending the transaction so that it has the 136 correct value for the `prev_ids` key. 137 138 Args: 139 transaction_id (str) 140 destination (str) 141 origin_server_ts (int) 142 143 Returns: 144 list: A list of previous transaction ids. 145 """ 146 return defer.succeed([]) 147 148 def delivered_txn(self, transaction_id, destination, code, response_dict): 149 """Persists the response for an outgoing transaction. 150 151 Args: 152 transaction_id (str) 153 destination (str) 154 code (int) 155 response_json (str) 156 """ 157 pass 158 159 @cached(max_entries=10000) 160 def get_destination_retry_timings(self, destination): 161 """Gets the current retry timings (if any) for a given destination. 162 163 Args: 164 destination (str) 165 166 Returns: 167 None if not retrying 168 Otherwise a dict for the retry scheme 169 """ 170 return self.runInteraction( 171 "get_destination_retry_timings", 172 self._get_destination_retry_timings, destination) 173 174 def _get_destination_retry_timings(self, txn, destination): 175 result = self._simple_select_one_txn( 176 txn, 177 table="destinations", 178 keyvalues={ 179 "destination": destination, 180 }, 181 retcols=("destination", "retry_last_ts", "retry_interval"), 182 allow_none=True, 183 ) 184 185 if result and result["retry_last_ts"] > 0: 186 return result 187 else: 188 return None 189 190 def set_destination_retry_timings(self, destination, 191 retry_last_ts, retry_interval): 192 """Sets the current retry timings for a given destination. 193 Both timings should be zero if retrying is no longer occuring. 194 195 Args: 196 destination (str) 197 retry_last_ts (int) - time of last retry attempt in unix epoch ms 198 retry_interval (int) - how long until next retry in ms 199 """ 200 201 # XXX: we could chose to not bother persisting this if our cache thinks 202 # this is a NOOP 203 return self.runInteraction( 204 "set_destination_retry_timings", 205 self._set_destination_retry_timings, 206 destination, 207 retry_last_ts, 208 retry_interval, 209 ) 210 211 def _set_destination_retry_timings(self, txn, destination, 212 retry_last_ts, retry_interval): 213 self.database_engine.lock_table(txn, "destinations") 214 215 self._invalidate_cache_and_stream( 216 txn, self.get_destination_retry_timings, (destination,) 217 ) 218 219 # We need to be careful here as the data may have changed from under us 220 # due to a worker setting the timings. 221 222 prev_row = self._simple_select_one_txn( 223 txn, 224 table="destinations", 225 keyvalues={ 226 "destination": destination, 227 }, 228 retcols=("retry_last_ts", "retry_interval"), 229 allow_none=True, 230 ) 231 232 if not prev_row: 233 self._simple_insert_txn( 234 txn, 235 table="destinations", 236 values={ 237 "destination": destination, 238 "retry_last_ts": retry_last_ts, 239 "retry_interval": retry_interval, 240 } 241 ) 242 elif retry_interval == 0 or prev_row["retry_interval"] < retry_interval: 243 self._simple_update_one_txn( 244 txn, 245 "destinations", 246 keyvalues={ 247 "destination": destination, 248 }, 249 updatevalues={ 250 "retry_last_ts": retry_last_ts, 251 "retry_interval": retry_interval, 252 }, 253 ) 254 255 def get_destinations_needing_retry(self): 256 """Get all destinations which are due a retry for sending a transaction. 257 258 Returns: 259 list: A list of dicts 260 """ 261 262 return self.runInteraction( 263 "get_destinations_needing_retry", 264 self._get_destinations_needing_retry 265 ) 266 267 def _get_destinations_needing_retry(self, txn): 268 query = ( 269 "SELECT * FROM destinations" 270 " WHERE retry_last_ts > 0 and retry_next_ts < ?" 271 ) 272 273 txn.execute(query, (self._clock.time_msec(),)) 274 return self.cursor_to_dict(txn) 275 276 def _start_cleanup_transactions(self): 277 return run_as_background_process( 278 "cleanup_transactions", self._cleanup_transactions, 279 ) 280 281 def _cleanup_transactions(self): 282 now = self._clock.time_msec() 283 month_ago = now - 30 * 24 * 60 * 60 * 1000 284 285 def _cleanup_transactions_txn(txn): 286 txn.execute("DELETE FROM received_transactions WHERE ts < ?", (month_ago,)) 287 288 return self.runInteraction("_cleanup_transactions", _cleanup_transactions_txn) 289 [end of synapse/storage/transactions.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py --- a/synapse/storage/transactions.py +++ b/synapse/storage/transactions.py @@ -23,7 +23,6 @@ from twisted.internet import defer from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.util.caches.descriptors import cached from ._base import SQLBaseStore, db_to_json @@ -156,7 +155,6 @@ """ pass - @cached(max_entries=10000) def get_destination_retry_timings(self, destination): """Gets the current retry timings (if any) for a given destination. @@ -198,8 +196,6 @@ retry_interval (int) - how long until next retry in ms """ - # XXX: we could chose to not bother persisting this if our cache thinks - # this is a NOOP return self.runInteraction( "set_destination_retry_timings", self._set_destination_retry_timings, @@ -212,10 +208,6 @@ retry_last_ts, retry_interval): self.database_engine.lock_table(txn, "destinations") - self._invalidate_cache_and_stream( - txn, self.get_destination_retry_timings, (destination,) - ) - # We need to be careful here as the data may have changed from under us # due to a worker setting the timings.
{"golden_diff": "diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py\n--- a/synapse/storage/transactions.py\n+++ b/synapse/storage/transactions.py\n@@ -23,7 +23,6 @@\n from twisted.internet import defer\n \n from synapse.metrics.background_process_metrics import run_as_background_process\n-from synapse.util.caches.descriptors import cached\n \n from ._base import SQLBaseStore, db_to_json\n \n@@ -156,7 +155,6 @@\n \"\"\"\n pass\n \n- @cached(max_entries=10000)\n def get_destination_retry_timings(self, destination):\n \"\"\"Gets the current retry timings (if any) for a given destination.\n \n@@ -198,8 +196,6 @@\n retry_interval (int) - how long until next retry in ms\n \"\"\"\n \n- # XXX: we could chose to not bother persisting this if our cache thinks\n- # this is a NOOP\n return self.runInteraction(\n \"set_destination_retry_timings\",\n self._set_destination_retry_timings,\n@@ -212,10 +208,6 @@\n retry_last_ts, retry_interval):\n self.database_engine.lock_table(txn, \"destinations\")\n \n- self._invalidate_cache_and_stream(\n- txn, self.get_destination_retry_timings, (destination,)\n- )\n-\n # We need to be careful here as the data may have changed from under us\n # due to a worker setting the timings.\n", "issue": "Split fed worker & sender config means destination retry sched doesn't invalidate when you receive traffic\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2014-2016 OpenMarket Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nfrom collections import namedtuple\n\nimport six\n\nfrom canonicaljson import encode_canonical_json\n\nfrom twisted.internet import defer\n\nfrom synapse.metrics.background_process_metrics import run_as_background_process\nfrom synapse.util.caches.descriptors import cached\n\nfrom ._base import SQLBaseStore, db_to_json\n\n# py2 sqlite has buffer hardcoded as only binary type, so we must use it,\n# despite being deprecated and removed in favor of memoryview\nif six.PY2:\n db_binary_type = buffer\nelse:\n db_binary_type = memoryview\n\nlogger = logging.getLogger(__name__)\n\n\n_TransactionRow = namedtuple(\n \"_TransactionRow\", (\n \"id\", \"transaction_id\", \"destination\", \"ts\", \"response_code\",\n \"response_json\",\n )\n)\n\n_UpdateTransactionRow = namedtuple(\n \"_TransactionRow\", (\n \"response_code\", \"response_json\",\n )\n)\n\n\nclass TransactionStore(SQLBaseStore):\n \"\"\"A collection of queries for handling PDUs.\n \"\"\"\n\n def __init__(self, db_conn, hs):\n super(TransactionStore, self).__init__(db_conn, hs)\n\n self._clock.looping_call(self._start_cleanup_transactions, 30 * 60 * 1000)\n\n def get_received_txn_response(self, transaction_id, origin):\n \"\"\"For an incoming transaction from a given origin, check if we have\n already responded to it. If so, return the response code and response\n body (as a dict).\n\n Args:\n transaction_id (str)\n origin(str)\n\n Returns:\n tuple: None if we have not previously responded to\n this transaction or a 2-tuple of (int, dict)\n \"\"\"\n\n return self.runInteraction(\n \"get_received_txn_response\",\n self._get_received_txn_response, transaction_id, origin\n )\n\n def _get_received_txn_response(self, txn, transaction_id, origin):\n result = self._simple_select_one_txn(\n txn,\n table=\"received_transactions\",\n keyvalues={\n \"transaction_id\": transaction_id,\n \"origin\": origin,\n },\n retcols=(\n \"transaction_id\", \"origin\", \"ts\", \"response_code\", \"response_json\",\n \"has_been_referenced\",\n ),\n allow_none=True,\n )\n\n if result and result[\"response_code\"]:\n return result[\"response_code\"], db_to_json(result[\"response_json\"])\n\n else:\n return None\n\n def set_received_txn_response(self, transaction_id, origin, code,\n response_dict):\n \"\"\"Persist the response we returened for an incoming transaction, and\n should return for subsequent transactions with the same transaction_id\n and origin.\n\n Args:\n txn\n transaction_id (str)\n origin (str)\n code (int)\n response_json (str)\n \"\"\"\n\n return self._simple_insert(\n table=\"received_transactions\",\n values={\n \"transaction_id\": transaction_id,\n \"origin\": origin,\n \"response_code\": code,\n \"response_json\": db_binary_type(encode_canonical_json(response_dict)),\n \"ts\": self._clock.time_msec(),\n },\n or_ignore=True,\n desc=\"set_received_txn_response\",\n )\n\n def prep_send_transaction(self, transaction_id, destination,\n origin_server_ts):\n \"\"\"Persists an outgoing transaction and calculates the values for the\n previous transaction id list.\n\n This should be called before sending the transaction so that it has the\n correct value for the `prev_ids` key.\n\n Args:\n transaction_id (str)\n destination (str)\n origin_server_ts (int)\n\n Returns:\n list: A list of previous transaction ids.\n \"\"\"\n return defer.succeed([])\n\n def delivered_txn(self, transaction_id, destination, code, response_dict):\n \"\"\"Persists the response for an outgoing transaction.\n\n Args:\n transaction_id (str)\n destination (str)\n code (int)\n response_json (str)\n \"\"\"\n pass\n\n @cached(max_entries=10000)\n def get_destination_retry_timings(self, destination):\n \"\"\"Gets the current retry timings (if any) for a given destination.\n\n Args:\n destination (str)\n\n Returns:\n None if not retrying\n Otherwise a dict for the retry scheme\n \"\"\"\n return self.runInteraction(\n \"get_destination_retry_timings\",\n self._get_destination_retry_timings, destination)\n\n def _get_destination_retry_timings(self, txn, destination):\n result = self._simple_select_one_txn(\n txn,\n table=\"destinations\",\n keyvalues={\n \"destination\": destination,\n },\n retcols=(\"destination\", \"retry_last_ts\", \"retry_interval\"),\n allow_none=True,\n )\n\n if result and result[\"retry_last_ts\"] > 0:\n return result\n else:\n return None\n\n def set_destination_retry_timings(self, destination,\n retry_last_ts, retry_interval):\n \"\"\"Sets the current retry timings for a given destination.\n Both timings should be zero if retrying is no longer occuring.\n\n Args:\n destination (str)\n retry_last_ts (int) - time of last retry attempt in unix epoch ms\n retry_interval (int) - how long until next retry in ms\n \"\"\"\n\n # XXX: we could chose to not bother persisting this if our cache thinks\n # this is a NOOP\n return self.runInteraction(\n \"set_destination_retry_timings\",\n self._set_destination_retry_timings,\n destination,\n retry_last_ts,\n retry_interval,\n )\n\n def _set_destination_retry_timings(self, txn, destination,\n retry_last_ts, retry_interval):\n self.database_engine.lock_table(txn, \"destinations\")\n\n self._invalidate_cache_and_stream(\n txn, self.get_destination_retry_timings, (destination,)\n )\n\n # We need to be careful here as the data may have changed from under us\n # due to a worker setting the timings.\n\n prev_row = self._simple_select_one_txn(\n txn,\n table=\"destinations\",\n keyvalues={\n \"destination\": destination,\n },\n retcols=(\"retry_last_ts\", \"retry_interval\"),\n allow_none=True,\n )\n\n if not prev_row:\n self._simple_insert_txn(\n txn,\n table=\"destinations\",\n values={\n \"destination\": destination,\n \"retry_last_ts\": retry_last_ts,\n \"retry_interval\": retry_interval,\n }\n )\n elif retry_interval == 0 or prev_row[\"retry_interval\"] < retry_interval:\n self._simple_update_one_txn(\n txn,\n \"destinations\",\n keyvalues={\n \"destination\": destination,\n },\n updatevalues={\n \"retry_last_ts\": retry_last_ts,\n \"retry_interval\": retry_interval,\n },\n )\n\n def get_destinations_needing_retry(self):\n \"\"\"Get all destinations which are due a retry for sending a transaction.\n\n Returns:\n list: A list of dicts\n \"\"\"\n\n return self.runInteraction(\n \"get_destinations_needing_retry\",\n self._get_destinations_needing_retry\n )\n\n def _get_destinations_needing_retry(self, txn):\n query = (\n \"SELECT * FROM destinations\"\n \" WHERE retry_last_ts > 0 and retry_next_ts < ?\"\n )\n\n txn.execute(query, (self._clock.time_msec(),))\n return self.cursor_to_dict(txn)\n\n def _start_cleanup_transactions(self):\n return run_as_background_process(\n \"cleanup_transactions\", self._cleanup_transactions,\n )\n\n def _cleanup_transactions(self):\n now = self._clock.time_msec()\n month_ago = now - 30 * 24 * 60 * 60 * 1000\n\n def _cleanup_transactions_txn(txn):\n txn.execute(\"DELETE FROM received_transactions WHERE ts < ?\", (month_ago,))\n\n return self.runInteraction(\"_cleanup_transactions\", _cleanup_transactions_txn)\n", "path": "synapse/storage/transactions.py"}]}
3,186
329
gh_patches_debug_18041
rasdani/github-patches
git_diff
digitalfabrik__integreat-cms-242
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add additional linking / features to TinyMCE - [ ] Add functionality to link button: when an email-address is marked, the link should be generated automatically with mailto:, when telephone number it shoudl add tel:, adding a website should add https:// - [ ] Add functionality to insert internal links (choose from a list or 'type to search' - [x] CSS class to add background color to notranslate tags - [x] Source Code view available in two option menus - [x] Menus allow access to forbidden functions (align left/right/justify) - [ ] Dropdown should only contain headings </issue> <code> [start of backend/cms/models/page.py] 1 """Models representing a page and page translation with content 2 """ 3 4 import logging 5 6 from django.db import models 7 from django.conf import settings 8 from django.urls import reverse 9 from django.utils import timezone 10 from django.utils.translation import get_language 11 12 from mptt.models import MPTTModel, TreeForeignKey 13 14 from .language import Language, LanguageTreeNode 15 from .region import Region 16 from ..constants import status 17 18 19 logger = logging.getLogger(__name__) 20 21 22 class Page(MPTTModel): 23 """Class that represents an Page database object 24 25 Args: 26 MPTTModel : Library for hierachical data structures 27 """ 28 29 parent = TreeForeignKey( 30 'self', 31 blank=True, 32 null=True, 33 related_name='children', 34 on_delete=models.PROTECT 35 ) 36 icon = models.ImageField( 37 blank=True, 38 null=True, 39 upload_to='pages/%Y/%m/%d' 40 ) 41 region = models.ForeignKey(Region, related_name='pages', on_delete=models.CASCADE) 42 archived = models.BooleanField(default=False) 43 mirrored_page = models.ForeignKey('self', null=True, blank=True, on_delete=models.PROTECT) 44 mirrored_page_first = models.BooleanField(default=True) 45 editors = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name='editors', blank=True) 46 publishers = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name='publishers', blank=True) 47 created_date = models.DateTimeField(default=timezone.now) 48 last_updated = models.DateTimeField(auto_now=True) 49 50 @property 51 def depth(self): 52 """Provide level of inheritance 53 54 Returns: 55 Int : Number of ancestors 56 """ 57 58 return len(self.get_ancestors()) 59 60 @property 61 def languages(self): 62 page_translations = self.page_translations.prefetch_related('language').all() 63 languages = [] 64 for page_translation in page_translations: 65 if page_translation.language not in languages: 66 languages.append(page_translation.language) 67 return languages 68 69 def get_translation(self, language_code): 70 return self.page_translations.filter( 71 language__code=language_code 72 ).first() 73 74 # Helper function for page labels, second level paths etc. where the ancestor translation might not exist 75 def get_first_translation(self, priority_language_codes=None): 76 # Taking [] directly as default parameter would be dangerous because it is mutable 77 if not priority_language_codes: 78 priority_language_codes = [] 79 for language_code in priority_language_codes + ['en-us', 'de-de']: 80 if self.page_translations.filter(language__code=language_code).exists(): 81 return self.page_translations.filter(language__code=language_code).first() 82 return self.page_translations.first() 83 84 def get_public_translation(self, language_code): 85 return self.page_translations.filter( 86 language__code=language_code, 87 status=status.PUBLIC, 88 ).first() 89 90 def get_mirrored_text(self, language_code): 91 """ 92 This content needs to be added when delivering content to end users 93 """ 94 return self.mirrored_page.get_translation(language_code).text 95 96 def get_absolute_url(self): 97 return reverse('edit_page', kwargs={ 98 'page_id': self.id, 99 'region_slug': self.region.slug, 100 'language_code': self.region.default_language.code, 101 }) 102 103 @staticmethod 104 def get_archived(region_slug): 105 return Page.objects.filter(archived=True, region__slug=region_slug) 106 107 @staticmethod 108 def archived_count(region_slug): 109 return Page.objects.filter(archived=True, region__slug=region_slug).count() 110 111 def __str__(self): 112 first_translation = self.get_first_translation() 113 return '(id: {}, slug: {} ({}))'.format(self.id, first_translation.slug, first_translation.language.code) 114 115 @classmethod 116 def get_tree(cls, region_slug, archived=False): 117 """Function for building up a Treeview of all pages 118 119 Args: 120 region_slug: slug of the region the page belongs to 121 archived: if true archived pages will be included 122 123 Returns: 124 [pages]: Array of pages connected with their relations 125 """ 126 127 if archived: 128 pages = cls.objects.all().prefetch_related( 129 'page_translations' 130 ).filter( 131 region__slug=region_slug 132 ) 133 else: 134 pages = cls.objects.all().prefetch_related( 135 'page_translations' 136 ).filter( 137 region__slug=region_slug, 138 archived=False 139 ) 140 141 return pages 142 143 def best_language_title(self): 144 page_translation = self.page_translations.filter(language__code=get_language()) 145 if not page_translation: 146 alt_code = LanguageTreeNode.objects.get(region__id=self.region.id).get_root().language.code 147 page_translation = self.page_translations.filter(language__code=alt_code) 148 return page_translation.first().title 149 150 class Meta: 151 default_permissions = () 152 permissions = ( 153 ('view_pages', 'Can view pages'), 154 ('edit_pages', 'Can edit pages'), 155 ('publish_pages', 'Can publish pages'), 156 ('grant_page_permissions', 'Can grant page permissions'), 157 ) 158 159 160 class PageTranslation(models.Model): 161 """Class defining a Translation of a Page 162 163 Args: 164 models : Class inherit of django-Models 165 """ 166 167 page = models.ForeignKey(Page, related_name='page_translations', on_delete=models.CASCADE) 168 language = models.ForeignKey( 169 Language, 170 related_name='page_translations', 171 on_delete=models.CASCADE 172 ) 173 slug = models.SlugField(max_length=200, blank=True, allow_unicode=True) 174 title = models.CharField(max_length=250) 175 text = models.TextField() 176 status = models.CharField(max_length=6, choices=status.CHOICES, default=status.DRAFT) 177 currently_in_translation = models.BooleanField(default=False) 178 version = models.PositiveIntegerField(default=0) 179 minor_edit = models.BooleanField(default=False) 180 creator = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete=models.SET_NULL) 181 created_date = models.DateTimeField(default=timezone.now) 182 last_updated = models.DateTimeField(auto_now=True) 183 184 @property 185 def ancestor_path(self): 186 return '/'.join([ 187 ancestor.get_first_translation([self.language.code]).slug 188 for ancestor in self.page.get_ancestors() 189 ]) 190 191 @property 192 def permalink(self): 193 return '{}/{}/{}/{}'.format( 194 self.page.region.slug, self.language.code, self.ancestor_path, self.slug 195 ) 196 197 @property 198 def available_languages(self): 199 languages = self.page.languages 200 languages.remove(self.language) 201 available_languages = {} 202 for language in languages: 203 other_translation = self.page.get_public_translation(language.code) 204 if other_translation: 205 available_languages[language.code] = { 206 'id': other_translation.id, 207 'url': other_translation.permalink 208 } 209 return available_languages 210 211 @property 212 def source_translation(self): 213 source_language_tree_node = self.page.region.language_tree_nodes.get(language=self.language).parent 214 if source_language_tree_node: 215 return self.page.get_translation(source_language_tree_node.code) 216 return None 217 218 @property 219 def latest_public_revision(self): 220 return self.page.page_translations.filter( 221 language=self.language, 222 status=status.PUBLIC, 223 ).first() 224 225 @property 226 def latest_major_revision(self): 227 return self.page.page_translations.filter( 228 language=self.language, 229 minor_edit=False, 230 ).first() 231 232 @property 233 def latest_major_public_revision(self): 234 return self.page.page_translations.filter( 235 language=self.language, 236 status=status.PUBLIC, 237 minor_edit=False, 238 ).first() 239 240 @property 241 def previous_revision(self): 242 version = self.version - 1 243 return self.page.page_translations.filter( 244 language=self.language, 245 version=version, 246 ).first() 247 248 @property 249 def is_outdated(self): 250 source_translation = self.source_translation 251 # If self.language is the root language, this translation can never be outdated 252 if not source_translation: 253 return False 254 # If the source translation is outdated, this translation can not be up to date 255 if source_translation.is_outdated: 256 return True 257 self_revision = self.latest_major_public_revision 258 source_revision = source_translation.latest_major_public_revision 259 # If on of the translations has no major public revision, it cannot be outdated 260 if not self_revision or not source_revision: 261 return False 262 return self_revision.last_updated < source_revision.last_updated 263 264 @property 265 def combined_text(self): 266 """ 267 Combines the text from the PageTranslation with the text from the mirrored page. 268 """ 269 if self.page.mirrored_page_first: 270 return self.page.get_mirrored_text(self.language.code) + self.text 271 return self.text + self.page.get_mirrored_text(self.language.code) 272 273 def __str__(self): 274 return '(id: {}, lang: {}, slug: {})'.format(self.id, self.language.code, self.slug) 275 276 class Meta: 277 ordering = ['page', '-version'] 278 default_permissions = () 279 [end of backend/cms/models/page.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/backend/cms/models/page.py b/backend/cms/models/page.py --- a/backend/cms/models/page.py +++ b/backend/cms/models/page.py @@ -11,7 +11,7 @@ from mptt.models import MPTTModel, TreeForeignKey -from .language import Language, LanguageTreeNode +from .language import Language from .region import Region from ..constants import status @@ -143,7 +143,7 @@ def best_language_title(self): page_translation = self.page_translations.filter(language__code=get_language()) if not page_translation: - alt_code = LanguageTreeNode.objects.get(region__id=self.region.id).get_root().language.code + alt_code = self.region.default_language.code page_translation = self.page_translations.filter(language__code=alt_code) return page_translation.first().title
{"golden_diff": "diff --git a/backend/cms/models/page.py b/backend/cms/models/page.py\n--- a/backend/cms/models/page.py\n+++ b/backend/cms/models/page.py\n@@ -11,7 +11,7 @@\n \n from mptt.models import MPTTModel, TreeForeignKey\n \n-from .language import Language, LanguageTreeNode\n+from .language import Language\n from .region import Region\n from ..constants import status\n \n@@ -143,7 +143,7 @@\n def best_language_title(self):\n page_translation = self.page_translations.filter(language__code=get_language())\n if not page_translation:\n- alt_code = LanguageTreeNode.objects.get(region__id=self.region.id).get_root().language.code\n+ alt_code = self.region.default_language.code\n page_translation = self.page_translations.filter(language__code=alt_code)\n return page_translation.first().title\n", "issue": "Add additional linking / features to TinyMCE\n- [ ] Add functionality to link button: when an email-address is marked, the link should be generated automatically with mailto:, when telephone number it shoudl add tel:, adding a website should add https://\r\n\r\n- [ ] Add functionality to insert internal links (choose from a list or 'type to search'\r\n\r\n- [x] CSS class to add background color to notranslate tags\r\n\r\n- [x] Source Code view available in two option menus\r\n\r\n- [x] Menus allow access to forbidden functions (align left/right/justify)\r\n\r\n- [ ] Dropdown should only contain headings\n", "before_files": [{"content": "\"\"\"Models representing a page and page translation with content\n\"\"\"\n\nimport logging\n\nfrom django.db import models\nfrom django.conf import settings\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.translation import get_language\n\nfrom mptt.models import MPTTModel, TreeForeignKey\n\nfrom .language import Language, LanguageTreeNode\nfrom .region import Region\nfrom ..constants import status\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Page(MPTTModel):\n \"\"\"Class that represents an Page database object\n\n Args:\n MPTTModel : Library for hierachical data structures\n \"\"\"\n\n parent = TreeForeignKey(\n 'self',\n blank=True,\n null=True,\n related_name='children',\n on_delete=models.PROTECT\n )\n icon = models.ImageField(\n blank=True,\n null=True,\n upload_to='pages/%Y/%m/%d'\n )\n region = models.ForeignKey(Region, related_name='pages', on_delete=models.CASCADE)\n archived = models.BooleanField(default=False)\n mirrored_page = models.ForeignKey('self', null=True, blank=True, on_delete=models.PROTECT)\n mirrored_page_first = models.BooleanField(default=True)\n editors = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name='editors', blank=True)\n publishers = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name='publishers', blank=True)\n created_date = models.DateTimeField(default=timezone.now)\n last_updated = models.DateTimeField(auto_now=True)\n\n @property\n def depth(self):\n \"\"\"Provide level of inheritance\n\n Returns:\n Int : Number of ancestors\n \"\"\"\n\n return len(self.get_ancestors())\n\n @property\n def languages(self):\n page_translations = self.page_translations.prefetch_related('language').all()\n languages = []\n for page_translation in page_translations:\n if page_translation.language not in languages:\n languages.append(page_translation.language)\n return languages\n\n def get_translation(self, language_code):\n return self.page_translations.filter(\n language__code=language_code\n ).first()\n\n # Helper function for page labels, second level paths etc. where the ancestor translation might not exist\n def get_first_translation(self, priority_language_codes=None):\n # Taking [] directly as default parameter would be dangerous because it is mutable\n if not priority_language_codes:\n priority_language_codes = []\n for language_code in priority_language_codes + ['en-us', 'de-de']:\n if self.page_translations.filter(language__code=language_code).exists():\n return self.page_translations.filter(language__code=language_code).first()\n return self.page_translations.first()\n\n def get_public_translation(self, language_code):\n return self.page_translations.filter(\n language__code=language_code,\n status=status.PUBLIC,\n ).first()\n\n def get_mirrored_text(self, language_code):\n \"\"\"\n This content needs to be added when delivering content to end users\n \"\"\"\n return self.mirrored_page.get_translation(language_code).text\n\n def get_absolute_url(self):\n return reverse('edit_page', kwargs={\n 'page_id': self.id,\n 'region_slug': self.region.slug,\n 'language_code': self.region.default_language.code,\n })\n\n @staticmethod\n def get_archived(region_slug):\n return Page.objects.filter(archived=True, region__slug=region_slug)\n\n @staticmethod\n def archived_count(region_slug):\n return Page.objects.filter(archived=True, region__slug=region_slug).count()\n\n def __str__(self):\n first_translation = self.get_first_translation()\n return '(id: {}, slug: {} ({}))'.format(self.id, first_translation.slug, first_translation.language.code)\n\n @classmethod\n def get_tree(cls, region_slug, archived=False):\n \"\"\"Function for building up a Treeview of all pages\n\n Args:\n region_slug: slug of the region the page belongs to\n archived: if true archived pages will be included\n\n Returns:\n [pages]: Array of pages connected with their relations\n \"\"\"\n\n if archived:\n pages = cls.objects.all().prefetch_related(\n 'page_translations'\n ).filter(\n region__slug=region_slug\n )\n else:\n pages = cls.objects.all().prefetch_related(\n 'page_translations'\n ).filter(\n region__slug=region_slug,\n archived=False\n )\n\n return pages\n\n def best_language_title(self):\n page_translation = self.page_translations.filter(language__code=get_language())\n if not page_translation:\n alt_code = LanguageTreeNode.objects.get(region__id=self.region.id).get_root().language.code\n page_translation = self.page_translations.filter(language__code=alt_code)\n return page_translation.first().title\n\n class Meta:\n default_permissions = ()\n permissions = (\n ('view_pages', 'Can view pages'),\n ('edit_pages', 'Can edit pages'),\n ('publish_pages', 'Can publish pages'),\n ('grant_page_permissions', 'Can grant page permissions'),\n )\n\n\nclass PageTranslation(models.Model):\n \"\"\"Class defining a Translation of a Page\n\n Args:\n models : Class inherit of django-Models\n \"\"\"\n\n page = models.ForeignKey(Page, related_name='page_translations', on_delete=models.CASCADE)\n language = models.ForeignKey(\n Language,\n related_name='page_translations',\n on_delete=models.CASCADE\n )\n slug = models.SlugField(max_length=200, blank=True, allow_unicode=True)\n title = models.CharField(max_length=250)\n text = models.TextField()\n status = models.CharField(max_length=6, choices=status.CHOICES, default=status.DRAFT)\n currently_in_translation = models.BooleanField(default=False)\n version = models.PositiveIntegerField(default=0)\n minor_edit = models.BooleanField(default=False)\n creator = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete=models.SET_NULL)\n created_date = models.DateTimeField(default=timezone.now)\n last_updated = models.DateTimeField(auto_now=True)\n\n @property\n def ancestor_path(self):\n return '/'.join([\n ancestor.get_first_translation([self.language.code]).slug\n for ancestor in self.page.get_ancestors()\n ])\n\n @property\n def permalink(self):\n return '{}/{}/{}/{}'.format(\n self.page.region.slug, self.language.code, self.ancestor_path, self.slug\n )\n\n @property\n def available_languages(self):\n languages = self.page.languages\n languages.remove(self.language)\n available_languages = {}\n for language in languages:\n other_translation = self.page.get_public_translation(language.code)\n if other_translation:\n available_languages[language.code] = {\n 'id': other_translation.id,\n 'url': other_translation.permalink\n }\n return available_languages\n\n @property\n def source_translation(self):\n source_language_tree_node = self.page.region.language_tree_nodes.get(language=self.language).parent\n if source_language_tree_node:\n return self.page.get_translation(source_language_tree_node.code)\n return None\n\n @property\n def latest_public_revision(self):\n return self.page.page_translations.filter(\n language=self.language,\n status=status.PUBLIC,\n ).first()\n\n @property\n def latest_major_revision(self):\n return self.page.page_translations.filter(\n language=self.language,\n minor_edit=False,\n ).first()\n\n @property\n def latest_major_public_revision(self):\n return self.page.page_translations.filter(\n language=self.language,\n status=status.PUBLIC,\n minor_edit=False,\n ).first()\n\n @property\n def previous_revision(self):\n version = self.version - 1\n return self.page.page_translations.filter(\n language=self.language,\n version=version,\n ).first()\n\n @property\n def is_outdated(self):\n source_translation = self.source_translation\n # If self.language is the root language, this translation can never be outdated\n if not source_translation:\n return False\n # If the source translation is outdated, this translation can not be up to date\n if source_translation.is_outdated:\n return True\n self_revision = self.latest_major_public_revision\n source_revision = source_translation.latest_major_public_revision\n # If on of the translations has no major public revision, it cannot be outdated\n if not self_revision or not source_revision:\n return False\n return self_revision.last_updated < source_revision.last_updated\n\n @property\n def combined_text(self):\n \"\"\"\n Combines the text from the PageTranslation with the text from the mirrored page.\n \"\"\"\n if self.page.mirrored_page_first:\n return self.page.get_mirrored_text(self.language.code) + self.text\n return self.text + self.page.get_mirrored_text(self.language.code)\n\n def __str__(self):\n return '(id: {}, lang: {}, slug: {})'.format(self.id, self.language.code, self.slug)\n\n class Meta:\n ordering = ['page', '-version']\n default_permissions = ()\n", "path": "backend/cms/models/page.py"}]}
3,331
186
gh_patches_debug_21022
rasdani/github-patches
git_diff
bookwyrm-social__bookwyrm-1983
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> localisation string for a shelf is missing **Describe the bug** A translation exists, but the string "Currently reading" is shown in English language **To Reproduce** Switch language to non-english and check book status **Expected behavior** Translated string used instead of English **Screenshots** ![image](https://user-images.githubusercontent.com/81133/151704062-a99581a4-a941-4c6c-815a-62c7fd4885f6.png) **Instance** https://ziurkes.group.lt </issue> <code> [start of bookwyrm/templatetags/shelf_tags.py] 1 """ Filters and tags related to shelving books """ 2 from django import template 3 4 from bookwyrm import models 5 from bookwyrm.utils import cache 6 7 8 register = template.Library() 9 10 11 @register.filter(name="is_book_on_shelf") 12 def get_is_book_on_shelf(book, shelf): 13 """is a book on a shelf""" 14 return cache.get_or_set( 15 f"book-on-shelf-{book.id}-{shelf.id}", 16 lambda b, s: s.books.filter(id=b.id).exists(), 17 book, 18 shelf, 19 timeout=15552000, 20 ) 21 22 23 @register.filter(name="next_shelf") 24 def get_next_shelf(current_shelf): 25 """shelf you'd use to update reading progress""" 26 if current_shelf == "to-read": 27 return "reading" 28 if current_shelf == "reading": 29 return "read" 30 if current_shelf == "read": 31 return "complete" 32 return "to-read" 33 34 35 @register.simple_tag(takes_context=True) 36 def active_shelf(context, book): 37 """check what shelf a user has a book on, if any""" 38 user = context["request"].user 39 return cache.get_or_set( 40 f"active_shelf-{user.id}-{book.id}", 41 lambda u, b: ( 42 models.ShelfBook.objects.filter( 43 shelf__user=u, 44 book__parent_work__editions=b, 45 ).first() 46 or False 47 ), 48 user, 49 book, 50 timeout=15552000, 51 ) or {"book": book} 52 53 54 @register.simple_tag(takes_context=False) 55 def latest_read_through(book, user): 56 """the most recent read activity""" 57 return cache.get_or_set( 58 f"latest_read_through-{user.id}-{book.id}", 59 lambda u, b: ( 60 models.ReadThrough.objects.filter(user=u, book=b, is_active=True) 61 .order_by("-start_date") 62 .first() 63 or False 64 ), 65 user, 66 book, 67 timeout=15552000, 68 ) 69 [end of bookwyrm/templatetags/shelf_tags.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bookwyrm/templatetags/shelf_tags.py b/bookwyrm/templatetags/shelf_tags.py --- a/bookwyrm/templatetags/shelf_tags.py +++ b/bookwyrm/templatetags/shelf_tags.py @@ -1,5 +1,6 @@ """ Filters and tags related to shelving books """ from django import template +from django.utils.translation import gettext_lazy as _ from bookwyrm import models from bookwyrm.utils import cache @@ -32,6 +33,24 @@ return "to-read" [email protected](name="translate_shelf_name") +def get_translated_shelf_name(shelf): + """produced translated shelf nidentifierame""" + if not shelf: + return "" + # support obj or dict + identifier = shelf["identifier"] if isinstance(shelf, dict) else shelf.identifier + if identifier == "all": + return _("All books") + if identifier == "to-read": + return _("To Read") + if identifier == "reading": + return _("Currently Reading") + if identifier == "read": + return _("Read") + return shelf["name"] if isinstance(shelf, dict) else shelf.name + + @register.simple_tag(takes_context=True) def active_shelf(context, book): """check what shelf a user has a book on, if any"""
{"golden_diff": "diff --git a/bookwyrm/templatetags/shelf_tags.py b/bookwyrm/templatetags/shelf_tags.py\n--- a/bookwyrm/templatetags/shelf_tags.py\n+++ b/bookwyrm/templatetags/shelf_tags.py\n@@ -1,5 +1,6 @@\n \"\"\" Filters and tags related to shelving books \"\"\"\n from django import template\n+from django.utils.translation import gettext_lazy as _\n \n from bookwyrm import models\n from bookwyrm.utils import cache\n@@ -32,6 +33,24 @@\n return \"to-read\"\n \n \[email protected](name=\"translate_shelf_name\")\n+def get_translated_shelf_name(shelf):\n+ \"\"\"produced translated shelf nidentifierame\"\"\"\n+ if not shelf:\n+ return \"\"\n+ # support obj or dict\n+ identifier = shelf[\"identifier\"] if isinstance(shelf, dict) else shelf.identifier\n+ if identifier == \"all\":\n+ return _(\"All books\")\n+ if identifier == \"to-read\":\n+ return _(\"To Read\")\n+ if identifier == \"reading\":\n+ return _(\"Currently Reading\")\n+ if identifier == \"read\":\n+ return _(\"Read\")\n+ return shelf[\"name\"] if isinstance(shelf, dict) else shelf.name\n+\n+\n @register.simple_tag(takes_context=True)\n def active_shelf(context, book):\n \"\"\"check what shelf a user has a book on, if any\"\"\"\n", "issue": "localisation string for a shelf is missing\n**Describe the bug**\r\nA translation exists, but the string \"Currently reading\"\u00a0is shown in English language\r\n\r\n**To Reproduce**\r\nSwitch language to non-english and check book status\r\n\r\n**Expected behavior**\r\nTranslated string used instead of English\r\n\r\n**Screenshots**\r\n\r\n![image](https://user-images.githubusercontent.com/81133/151704062-a99581a4-a941-4c6c-815a-62c7fd4885f6.png)\r\n\r\n**Instance**\r\nhttps://ziurkes.group.lt\r\n\r\n\n", "before_files": [{"content": "\"\"\" Filters and tags related to shelving books \"\"\"\nfrom django import template\n\nfrom bookwyrm import models\nfrom bookwyrm.utils import cache\n\n\nregister = template.Library()\n\n\[email protected](name=\"is_book_on_shelf\")\ndef get_is_book_on_shelf(book, shelf):\n \"\"\"is a book on a shelf\"\"\"\n return cache.get_or_set(\n f\"book-on-shelf-{book.id}-{shelf.id}\",\n lambda b, s: s.books.filter(id=b.id).exists(),\n book,\n shelf,\n timeout=15552000,\n )\n\n\[email protected](name=\"next_shelf\")\ndef get_next_shelf(current_shelf):\n \"\"\"shelf you'd use to update reading progress\"\"\"\n if current_shelf == \"to-read\":\n return \"reading\"\n if current_shelf == \"reading\":\n return \"read\"\n if current_shelf == \"read\":\n return \"complete\"\n return \"to-read\"\n\n\[email protected]_tag(takes_context=True)\ndef active_shelf(context, book):\n \"\"\"check what shelf a user has a book on, if any\"\"\"\n user = context[\"request\"].user\n return cache.get_or_set(\n f\"active_shelf-{user.id}-{book.id}\",\n lambda u, b: (\n models.ShelfBook.objects.filter(\n shelf__user=u,\n book__parent_work__editions=b,\n ).first()\n or False\n ),\n user,\n book,\n timeout=15552000,\n ) or {\"book\": book}\n\n\[email protected]_tag(takes_context=False)\ndef latest_read_through(book, user):\n \"\"\"the most recent read activity\"\"\"\n return cache.get_or_set(\n f\"latest_read_through-{user.id}-{book.id}\",\n lambda u, b: (\n models.ReadThrough.objects.filter(user=u, book=b, is_active=True)\n .order_by(\"-start_date\")\n .first()\n or False\n ),\n user,\n book,\n timeout=15552000,\n )\n", "path": "bookwyrm/templatetags/shelf_tags.py"}]}
1,261
313
gh_patches_debug_14195
rasdani/github-patches
git_diff
e-valuation__EvaP-1076
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add information message on deactivated course page for editors Editors can open a course's edit page after they approved the course, however the form will then be disabled. A message on top of the page should explain this to the user. </issue> <code> [start of evap/contributor/views.py] 1 from django.contrib import messages 2 from django.core.exceptions import PermissionDenied, SuspiciousOperation 3 from django.forms.models import inlineformset_factory 4 from django.shortcuts import get_object_or_404, redirect, render 5 from django.utils.translation import ugettext as _ 6 from django.db import IntegrityError, transaction 7 8 from evap.contributor.forms import CourseForm, DelegatesForm, EditorContributionForm 9 from evap.evaluation.auth import contributor_or_delegate_required, editor_or_delegate_required, editor_required 10 from evap.evaluation.models import Contribution, Course, Semester 11 from evap.evaluation.tools import STATES_ORDERED, sort_formset 12 from evap.results.tools import calculate_average_grades_and_deviation 13 from evap.staff.forms import ContributionFormSet 14 from evap.student.views import vote_preview 15 16 17 @contributor_or_delegate_required 18 def index(request): 19 user = request.user 20 21 contributor_visible_states = ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed', 'published'] 22 own_courses = Course.objects.filter(contributions__contributor=user, state__in=contributor_visible_states) 23 24 represented_users = user.represented_users.all() 25 delegated_courses = Course.objects.exclude(id__in=own_courses).filter(contributions__can_edit=True, contributions__contributor__in=represented_users, state__in=contributor_visible_states) 26 27 all_courses = list(own_courses) + list(delegated_courses) 28 all_courses.sort(key=lambda course: list(STATES_ORDERED.keys()).index(course.state)) 29 30 for course in all_courses: 31 if course.state == 'published': 32 course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course) 33 34 semesters = Semester.objects.all() 35 semester_list = [dict( 36 semester_name=semester.name, 37 id=semester.id, 38 is_active_semester=semester.is_active_semester, 39 courses=[course for course in all_courses if course.semester_id == semester.id] 40 ) for semester in semesters] 41 42 template_data = dict(semester_list=semester_list, delegated_courses=delegated_courses) 43 return render(request, "contributor_index.html", template_data) 44 45 46 @editor_required 47 def settings_edit(request): 48 user = request.user 49 form = DelegatesForm(request.POST or None, request.FILES or None, instance=user) 50 51 if form.is_valid(): 52 form.save() 53 54 messages.success(request, _("Successfully updated your settings.")) 55 return redirect('contributor:settings_edit') 56 else: 57 return render(request, "contributor_settings.html", dict( 58 form=form, 59 delegate_of=user.represented_users.all(), 60 cc_users=user.cc_users.all(), 61 ccing_users=user.ccing_users.all(), 62 )) 63 64 65 @editor_or_delegate_required 66 def course_view(request, course_id): 67 user = request.user 68 course = get_object_or_404(Course, id=course_id) 69 70 # check rights 71 if not (course.is_user_editor_or_delegate(user) and course.state in ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed']): 72 raise PermissionDenied 73 74 InlineContributionFormset = inlineformset_factory(Course, Contribution, formset=ContributionFormSet, form=EditorContributionForm, extra=0) 75 76 form = CourseForm(request.POST or None, instance=course) 77 formset = InlineContributionFormset(request.POST or None, instance=course) 78 79 # make everything read-only 80 for cform in formset.forms + [form]: 81 for field in cform.fields.values(): 82 field.disabled = True 83 84 template_data = dict(form=form, formset=formset, course=course, editable=False, 85 responsibles=[contributor.username for contributor in course.responsible_contributors]) 86 return render(request, "contributor_course_form.html", template_data) 87 88 89 def render_preview(request, formset, course_form, course): 90 # open transaction to not let any other requests see anything of what we're doing here 91 try: 92 with transaction.atomic(): 93 course_form.save(user=request.user) 94 formset.save() 95 request.POST = None # this prevents errors rendered in the vote form 96 97 preview_response = vote_preview(request, course, for_rendering_in_modal=True).content 98 raise IntegrityError # rollback transaction to discard the database writes 99 except IntegrityError: 100 pass 101 102 return preview_response 103 104 105 @editor_or_delegate_required 106 def course_edit(request, course_id): 107 user = request.user 108 course = get_object_or_404(Course, id=course_id) 109 110 # check rights 111 if not (course.is_user_editor_or_delegate(user) and course.state == 'prepared'): 112 raise PermissionDenied 113 114 post_operation = request.POST.get('operation') if request.POST else None 115 preview = post_operation == 'preview' 116 117 InlineContributionFormset = inlineformset_factory(Course, Contribution, formset=ContributionFormSet, form=EditorContributionForm, extra=1) 118 course_form = CourseForm(request.POST or None, instance=course) 119 formset = InlineContributionFormset(request.POST or None, instance=course, can_change_responsible=False, form_kwargs={'course': course}) 120 121 forms_are_valid = course_form.is_valid() and formset.is_valid() 122 123 if forms_are_valid and not preview: 124 if post_operation not in ('save', 'approve'): 125 raise SuspiciousOperation("Invalid POST operation") 126 127 course_form.save(user=user) 128 formset.save() 129 130 if post_operation == 'approve': 131 course.editor_approve() 132 course.save() 133 messages.success(request, _("Successfully updated and approved course.")) 134 else: 135 messages.success(request, _("Successfully updated course.")) 136 137 return redirect('contributor:index') 138 else: 139 preview_html = None 140 if preview and forms_are_valid: 141 preview_html = render_preview(request, formset, course_form, course) 142 143 if not forms_are_valid and (course_form.errors or formset.errors): 144 if preview: 145 messages.error(request, _("The preview could not be rendered. Please resolve the errors shown below.")) 146 else: 147 messages.error(request, _("The form was not saved. Please resolve the errors shown below.")) 148 149 sort_formset(request, formset) 150 template_data = dict(form=course_form, formset=formset, course=course, editable=True, preview_html=preview_html, 151 responsibles=[contributor.username for contributor in course.responsible_contributors]) 152 return render(request, "contributor_course_form.html", template_data) 153 154 155 @contributor_or_delegate_required 156 def course_preview(request, course_id): 157 user = request.user 158 course = get_object_or_404(Course, id=course_id) 159 160 # check rights 161 if not (course.is_user_contributor_or_delegate(user) and course.state in ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed']): 162 raise PermissionDenied 163 164 return vote_preview(request, course) 165 [end of evap/contributor/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/evap/contributor/views.py b/evap/contributor/views.py --- a/evap/contributor/views.py +++ b/evap/contributor/views.py @@ -71,6 +71,9 @@ if not (course.is_user_editor_or_delegate(user) and course.state in ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed']): raise PermissionDenied + if course.is_user_editor_or_delegate(user): + messages.info(request, _('You cannot edit this course because it has already been approved.')) + InlineContributionFormset = inlineformset_factory(Course, Contribution, formset=ContributionFormSet, form=EditorContributionForm, extra=0) form = CourseForm(request.POST or None, instance=course)
{"golden_diff": "diff --git a/evap/contributor/views.py b/evap/contributor/views.py\n--- a/evap/contributor/views.py\n+++ b/evap/contributor/views.py\n@@ -71,6 +71,9 @@\n if not (course.is_user_editor_or_delegate(user) and course.state in ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed']):\n raise PermissionDenied\n \n+ if course.is_user_editor_or_delegate(user):\n+ messages.info(request, _('You cannot edit this course because it has already been approved.'))\n+\n InlineContributionFormset = inlineformset_factory(Course, Contribution, formset=ContributionFormSet, form=EditorContributionForm, extra=0)\n \n form = CourseForm(request.POST or None, instance=course)\n", "issue": "Add information message on deactivated course page for editors\nEditors can open a course's edit page after they approved the course, however the form will then be disabled. A message on top of the page should explain this to the user.\n", "before_files": [{"content": "from django.contrib import messages\nfrom django.core.exceptions import PermissionDenied, SuspiciousOperation\nfrom django.forms.models import inlineformset_factory\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils.translation import ugettext as _\nfrom django.db import IntegrityError, transaction\n\nfrom evap.contributor.forms import CourseForm, DelegatesForm, EditorContributionForm\nfrom evap.evaluation.auth import contributor_or_delegate_required, editor_or_delegate_required, editor_required\nfrom evap.evaluation.models import Contribution, Course, Semester\nfrom evap.evaluation.tools import STATES_ORDERED, sort_formset\nfrom evap.results.tools import calculate_average_grades_and_deviation\nfrom evap.staff.forms import ContributionFormSet\nfrom evap.student.views import vote_preview\n\n\n@contributor_or_delegate_required\ndef index(request):\n user = request.user\n\n contributor_visible_states = ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed', 'published']\n own_courses = Course.objects.filter(contributions__contributor=user, state__in=contributor_visible_states)\n\n represented_users = user.represented_users.all()\n delegated_courses = Course.objects.exclude(id__in=own_courses).filter(contributions__can_edit=True, contributions__contributor__in=represented_users, state__in=contributor_visible_states)\n\n all_courses = list(own_courses) + list(delegated_courses)\n all_courses.sort(key=lambda course: list(STATES_ORDERED.keys()).index(course.state))\n\n for course in all_courses:\n if course.state == 'published':\n course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)\n\n semesters = Semester.objects.all()\n semester_list = [dict(\n semester_name=semester.name,\n id=semester.id,\n is_active_semester=semester.is_active_semester,\n courses=[course for course in all_courses if course.semester_id == semester.id]\n ) for semester in semesters]\n\n template_data = dict(semester_list=semester_list, delegated_courses=delegated_courses)\n return render(request, \"contributor_index.html\", template_data)\n\n\n@editor_required\ndef settings_edit(request):\n user = request.user\n form = DelegatesForm(request.POST or None, request.FILES or None, instance=user)\n\n if form.is_valid():\n form.save()\n\n messages.success(request, _(\"Successfully updated your settings.\"))\n return redirect('contributor:settings_edit')\n else:\n return render(request, \"contributor_settings.html\", dict(\n form=form,\n delegate_of=user.represented_users.all(),\n cc_users=user.cc_users.all(),\n ccing_users=user.ccing_users.all(),\n ))\n\n\n@editor_or_delegate_required\ndef course_view(request, course_id):\n user = request.user\n course = get_object_or_404(Course, id=course_id)\n\n # check rights\n if not (course.is_user_editor_or_delegate(user) and course.state in ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed']):\n raise PermissionDenied\n\n InlineContributionFormset = inlineformset_factory(Course, Contribution, formset=ContributionFormSet, form=EditorContributionForm, extra=0)\n\n form = CourseForm(request.POST or None, instance=course)\n formset = InlineContributionFormset(request.POST or None, instance=course)\n\n # make everything read-only\n for cform in formset.forms + [form]:\n for field in cform.fields.values():\n field.disabled = True\n\n template_data = dict(form=form, formset=formset, course=course, editable=False,\n responsibles=[contributor.username for contributor in course.responsible_contributors])\n return render(request, \"contributor_course_form.html\", template_data)\n\n\ndef render_preview(request, formset, course_form, course):\n # open transaction to not let any other requests see anything of what we're doing here\n try:\n with transaction.atomic():\n course_form.save(user=request.user)\n formset.save()\n request.POST = None # this prevents errors rendered in the vote form\n\n preview_response = vote_preview(request, course, for_rendering_in_modal=True).content\n raise IntegrityError # rollback transaction to discard the database writes\n except IntegrityError:\n pass\n\n return preview_response\n\n\n@editor_or_delegate_required\ndef course_edit(request, course_id):\n user = request.user\n course = get_object_or_404(Course, id=course_id)\n\n # check rights\n if not (course.is_user_editor_or_delegate(user) and course.state == 'prepared'):\n raise PermissionDenied\n\n post_operation = request.POST.get('operation') if request.POST else None\n preview = post_operation == 'preview'\n\n InlineContributionFormset = inlineformset_factory(Course, Contribution, formset=ContributionFormSet, form=EditorContributionForm, extra=1)\n course_form = CourseForm(request.POST or None, instance=course)\n formset = InlineContributionFormset(request.POST or None, instance=course, can_change_responsible=False, form_kwargs={'course': course})\n\n forms_are_valid = course_form.is_valid() and formset.is_valid()\n\n if forms_are_valid and not preview:\n if post_operation not in ('save', 'approve'):\n raise SuspiciousOperation(\"Invalid POST operation\")\n\n course_form.save(user=user)\n formset.save()\n\n if post_operation == 'approve':\n course.editor_approve()\n course.save()\n messages.success(request, _(\"Successfully updated and approved course.\"))\n else:\n messages.success(request, _(\"Successfully updated course.\"))\n\n return redirect('contributor:index')\n else:\n preview_html = None\n if preview and forms_are_valid:\n preview_html = render_preview(request, formset, course_form, course)\n\n if not forms_are_valid and (course_form.errors or formset.errors):\n if preview:\n messages.error(request, _(\"The preview could not be rendered. Please resolve the errors shown below.\"))\n else:\n messages.error(request, _(\"The form was not saved. Please resolve the errors shown below.\"))\n\n sort_formset(request, formset)\n template_data = dict(form=course_form, formset=formset, course=course, editable=True, preview_html=preview_html,\n responsibles=[contributor.username for contributor in course.responsible_contributors])\n return render(request, \"contributor_course_form.html\", template_data)\n\n\n@contributor_or_delegate_required\ndef course_preview(request, course_id):\n user = request.user\n course = get_object_or_404(Course, id=course_id)\n\n # check rights\n if not (course.is_user_contributor_or_delegate(user) and course.state in ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed']):\n raise PermissionDenied\n\n return vote_preview(request, course)\n", "path": "evap/contributor/views.py"}]}
2,483
181
gh_patches_debug_1462
rasdani/github-patches
git_diff
liqd__a4-opin-400
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> fix function in api.js to use contenttype json and fix all react components </issue> <code> [start of euth_wagtail/settings/base.py] 1 """ 2 Django settings for euth_wagtail project. 3 4 Generated by 'django-admin startproject' using Django 1.9.1. 5 6 For more information on this file, see 7 https://docs.djangoproject.com/en/1.9/topics/settings/ 8 9 For the full list of settings and their values, see 10 https://docs.djangoproject.com/en/1.9/ref/settings/ 11 """ 12 13 # Build paths inside the project like this: os.path.join(BASE_DIR, ...) 14 import os 15 16 from django.utils.translation import ugettext_lazy as _ 17 18 PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 19 BASE_DIR = os.path.dirname(PROJECT_DIR) 20 21 22 # Quick-start development settings - unsuitable for production 23 # See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/ 24 25 26 # Application definition 27 28 INSTALLED_APPS = [ 29 'home', 30 31 'wagtail.wagtailforms', 32 'wagtail.wagtailredirects', 33 'wagtail.wagtailembeds', 34 'wagtail.wagtailsites', 35 'wagtail.wagtailusers', 36 'wagtail.wagtailsnippets', 37 'wagtail.wagtaildocs', 38 'wagtail.wagtailimages', 39 'wagtail.wagtailsearch', 40 'wagtail.wagtailadmin', 41 'wagtail.wagtailcore', 42 'wagtail.contrib.wagtailstyleguide', 43 44 'modelcluster', 45 'compressor', 46 'taggit', 47 'widget_tweaks', 48 'webpack_loader', 49 'easy_thumbnails', 50 'parler', 51 'ckeditor', 52 'ckeditor_uploader', 53 54 'django.contrib.sites', 55 'django.contrib.admin', 56 'django.contrib.auth', 57 'django.contrib.contenttypes', 58 'django.contrib.sessions', 59 'django.contrib.messages', 60 'django.contrib.staticfiles', 61 'django_countries', 62 'rest_framework', 63 'autofixture', 64 'rules.apps.AutodiscoverRulesConfig', 65 'allauth', 66 'allauth.account', 67 'allauth.socialaccount', 68 69 'euth.users.apps.UsersConfig', 70 'euth.organisations.apps.OrganisationsConfig', 71 'euth.projects.apps.ProjectsConfig', 72 'euth.comments.apps.CommentConfig', 73 'euth.phases.apps.PhasesConfig', 74 'euth.modules.apps.ModuleConfig', 75 'euth.ideas.apps.IdeaConfig', 76 'euth.ratings.apps.RatingsConfig', 77 'euth.reports.apps.ReportConfig', 78 'euth.dashboard.apps.DashboardConfig', 79 'euth.memberships.apps.MembershipsConfig', 80 'euth.documents.apps.DocumentConfig', 81 'euth.flashpoll.apps.FlashpollConfig', 82 'euth.contrib', 83 ] 84 85 MIDDLEWARE_CLASSES = [ 86 'django.contrib.sessions.middleware.SessionMiddleware', 87 'django.middleware.common.CommonMiddleware', 88 'django.middleware.csrf.CsrfViewMiddleware', 89 'django.contrib.auth.middleware.AuthenticationMiddleware', 90 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 91 'django.contrib.messages.middleware.MessageMiddleware', 92 'django.middleware.clickjacking.XFrameOptionsMiddleware', 93 'django.middleware.security.SecurityMiddleware', 94 'django.middleware.locale.LocaleMiddleware', 95 'wagtail.wagtailcore.middleware.SiteMiddleware', 96 'wagtail.wagtailredirects.middleware.RedirectMiddleware', 97 ] 98 99 SITE_ID = 1 100 101 ROOT_URLCONF = 'euth_wagtail.urls' 102 103 LOCALE_PATHS = [os.path.join(BASE_DIR, 'locale')] 104 105 TEMPLATES = [ 106 { 107 'BACKEND': 'django.template.backends.django.DjangoTemplates', 108 'DIRS': [ 109 os.path.join(PROJECT_DIR, 'templates'), 110 ], 111 'APP_DIRS': True, 112 'OPTIONS': { 113 'context_processors': [ 114 'django.template.context_processors.debug', 115 'django.template.context_processors.request', 116 'django.contrib.auth.context_processors.auth', 117 'django.contrib.messages.context_processors.messages', 118 ], 119 }, 120 }, 121 ] 122 123 WSGI_APPLICATION = 'euth_wagtail.wsgi.application' 124 125 126 # Database 127 # https://docs.djangoproject.com/en/1.9/ref/settings/#databases 128 129 DATABASES = { 130 'default': { 131 'ENGINE': 'django.db.backends.sqlite3', 132 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), 133 'TEST': { 134 'NAME': os.path.join(BASE_DIR, 'test_db.sqlite3'), 135 } 136 } 137 } 138 139 140 # Auth 141 # https://docs.djangoproject.com/en/1.8/topics/auth/customizing/ 142 143 AUTH_USER_MODEL = 'euth_users.User' 144 145 AUTHENTICATION_BACKENDS = ( 146 'rules.permissions.ObjectPermissionBackend', 147 'django.contrib.auth.backends.ModelBackend', 148 'allauth.account.auth_backends.AuthenticationBackend', 149 ) 150 151 CKEDITOR_UPLOAD_PATH = "uploads/" 152 CKEDITOR_ALLOW_NONIMAGE_FILES = False 153 154 CKEDITOR_CONFIGS = { 155 'default': { 156 'width': '100%', 157 'toolbar': 'Custom', 158 'toolbar_Custom': [ 159 ['Bold', 'Italic', 'Underline'], 160 ['NumberedList', 'BulletedList'], 161 ['Link', 'Unlink'] 162 ] 163 }, 164 'image-editor': { 165 'width': '100%', 166 'toolbar': 'Custom', 167 'toolbar_Custom': [ 168 ['Bold', 'Italic', 'Underline'], 169 ['Image'], 170 ['NumberedList', 'BulletedList'], 171 ['Link', 'Unlink'] 172 ] 173 } 174 } 175 176 BLEACH_LIST = { 177 'default' : { 178 'tags': ['p','strong','em','u','ol','li','ul','a'], 179 'attributes': { 180 'a': ['href', 'rel'], 181 }, 182 }, 183 'image-editor': { 184 'tags': ['p','strong','em','u','ol','li','ul','a','img'], 185 'attributes': { 186 'a': ['href', 'rel'], 187 'img': ['src', 'alt', 'style'] 188 }, 189 'styles': [ 190 'float', 191 'margin', 192 'padding', 193 'width', 194 'height', 195 'margin-bottom', 196 'margin-top', 197 'margin-left', 198 'margin-right', 199 ], 200 } 201 } 202 203 # Internationalization 204 # https://docs.djangoproject.com/en/1.9/topics/i18n/ 205 206 LANGUAGE_CODE = 'en' 207 208 TIME_ZONE = 'UTC' 209 210 USE_I18N = True 211 212 USE_L10N = True 213 214 USE_TZ = True 215 216 LANGUAGES = [ 217 ('en', _('English')), 218 ('de', _('German')), 219 ('it', _('Italien')), 220 ('fr', _('French')), 221 ('sv', _('Swedish')), 222 ('sl', _('Slovene')), 223 ('da', _('Danish')), 224 ] 225 226 PARLER_LANGUAGES = { 227 1:[{'code': language_code } for language_code, language in LANGUAGES] 228 } 229 230 # fixtures 231 232 FIXTURE_DIRS = [ os.path.join(PROJECT_DIR, 'fixtures') ] 233 234 ALLOWED_UPLOAD_IMAGES = ('image/png', 'image/jpeg', 'image/gif') 235 236 THUMBNAIL_ALIASES = { 237 '': { 238 'heroimage': {'size': (1500, 500), 'crop': 'smart'}, 239 'project_thumbnail': {'size': (520, 330), 'crop': 'smart'}, 240 'idea_image': {'size': (800, 0), 'crop': 'scale'}, 241 'organisation_thumbnail': {'size': (740, 540), 'crop': 'smart'}, 242 'avatar_small': {'size': (60, 60), 'crop': 'smart'}, 243 'org_avatar_small': {'size': (60, 60), 'crop': 'scale'}, 244 'org_avatar_medium': {'size': (200, 200), 'crop': 'scale'}, 245 } 246 } 247 # Static files (CSS, JavaScript, Images) 248 # https://docs.djangoproject.com/en/1.9/howto/static-files/ 249 250 STATICFILES_FINDERS = [ 251 'django.contrib.staticfiles.finders.FileSystemFinder', 252 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 253 'compressor.finders.CompressorFinder', 254 ] 255 256 257 WEBPACK_LOADER = { 258 'DEFAULT': { 259 'CACHE': False, 260 'BUNDLE_DIR_NAME': 'bundles/', # must end with slash 261 'STATS_FILE': os.path.join(BASE_DIR, 'webpack-stats.json'), 262 'POLL_INTERVAL': 0.1, 263 'IGNORE': ['.+\.hot-update.js', '.+\.map'] 264 } 265 } 266 267 268 STATICFILES_DIRS = [ 269 os.path.join(BASE_DIR, 'node_modules/jquery/dist'), 270 os.path.join(BASE_DIR, 'node_modules/salvattore/dist'), 271 os.path.join(BASE_DIR, 'node_modules/bootstrap-sass/assets/javascripts'), 272 os.path.join(BASE_DIR, 'node_modules/bootstrap-sass/assets/stylesheets'), 273 os.path.join(BASE_DIR, 'node_modules/font-awesome'), 274 os.path.join(BASE_DIR, 'node_modules/owl.carousel/dist'), 275 os.path.join(BASE_DIR, 'node_modules/flatpickr/assets'), 276 os.path.join(BASE_DIR, 'node_modules/flatpickr/dist'), 277 os.path.join(PROJECT_DIR, 'static'), 278 ] 279 280 STATIC_ROOT = os.path.join(BASE_DIR, 'static') 281 STATIC_URL = '/static/' 282 283 MEDIA_ROOT = os.path.join(BASE_DIR, 'media') 284 MEDIA_URL = '/media/' 285 286 COMPRESS_PRECOMPILERS = ( 287 ('text/x-scss', 'django_libsass.SassCompiler'), 288 ) 289 LIBSASS_SOURCEMAPS = True 290 291 EMAIL_SUBJECT_PREFIX = '[OPIN] ' 292 293 # Wagtail settings 294 295 WAGTAIL_SITE_NAME = "euth_wagtail" 296 297 # Authentification 298 299 LOGIN_URL = 'account_login' 300 LOGOUT_URL = 'account_logout' 301 LOGIN_REDIRECT_URL = '/' 302 303 ACCOUNT_ADAPTER = 'euth.users.adapters.EuthAccountAdapter' 304 ACCOUNT_AUTHENTICATION_METHOD = 'email' 305 ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 3 306 ACCOUNT_EMAIL_REQUIRED = True 307 ACCOUNT_EMAIL_SUBJECT_PREFIX = EMAIL_SUBJECT_PREFIX 308 ACCOUNT_EMAIL_VERIFICATION = 'mandatory' 309 ACCOUNT_SIGNUP_FORM_CLASS = 'euth.users.forms.SignUpForm' 310 ACCOUNT_USER_DISPLAY = 'euth.users.services.account_user_display' 311 ACCOUNT_USER_MODEL_USERNAME_FIELD = 'username' 312 ACCOUNT_USERNAME_REQUIRED = True 313 ACCOUNT_LOGIN_ATTEMPTS_LIMIT = 10 314 ACCOUNT_LOGIN_ATTEMPTS_TIMEOUT = 300 # seconds 315 ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True 316 ACCOUNT_LOGIN_ON_PASSWORD_RESET = True 317 ACCOUNT_LOGOUT_REDIRECT_URL = '/' 318 SOCIALACCOUNT_EMAIL_VERIFICATION = False 319 320 # Euth settings 321 322 COMMENTABLES = ( 323 ('euth_ideas', 'idea'), 324 ('euth_documents', 'paragraph'), 325 ('euth_documents', 'document'), 326 ('euth_comments', 'comment'), 327 ) 328 329 RATEABLES = COMMENTABLES 330 331 REPORTABLES = COMMENTABLES 332 333 FLASHPOLL_URL = "https://opin.flashpoll.eu/" 334 [end of euth_wagtail/settings/base.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/euth_wagtail/settings/base.py b/euth_wagtail/settings/base.py --- a/euth_wagtail/settings/base.py +++ b/euth_wagtail/settings/base.py @@ -317,6 +317,14 @@ ACCOUNT_LOGOUT_REDIRECT_URL = '/' SOCIALACCOUNT_EMAIL_VERIFICATION = False +# Rest framework + +REST_FRAMEWORK = { + 'DEFAULT_PARSER_CLASSES': ( + 'rest_framework.parsers.JSONParser', + ) +} + # Euth settings COMMENTABLES = (
{"golden_diff": "diff --git a/euth_wagtail/settings/base.py b/euth_wagtail/settings/base.py\n--- a/euth_wagtail/settings/base.py\n+++ b/euth_wagtail/settings/base.py\n@@ -317,6 +317,14 @@\n ACCOUNT_LOGOUT_REDIRECT_URL = '/'\n SOCIALACCOUNT_EMAIL_VERIFICATION = False\n \n+# Rest framework\n+\n+REST_FRAMEWORK = {\n+ 'DEFAULT_PARSER_CLASSES': (\n+ 'rest_framework.parsers.JSONParser',\n+ )\n+}\n+\n # Euth settings\n \n COMMENTABLES = (\n", "issue": "fix function in api.js to use contenttype json and fix all react components\n\n", "before_files": [{"content": "\"\"\"\nDjango settings for euth_wagtail project.\n\nGenerated by 'django-admin startproject' using Django 1.9.1.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.9/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.9/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\n\nfrom django.utils.translation import ugettext_lazy as _\n\nPROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nBASE_DIR = os.path.dirname(PROJECT_DIR)\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'home',\n\n 'wagtail.wagtailforms',\n 'wagtail.wagtailredirects',\n 'wagtail.wagtailembeds',\n 'wagtail.wagtailsites',\n 'wagtail.wagtailusers',\n 'wagtail.wagtailsnippets',\n 'wagtail.wagtaildocs',\n 'wagtail.wagtailimages',\n 'wagtail.wagtailsearch',\n 'wagtail.wagtailadmin',\n 'wagtail.wagtailcore',\n 'wagtail.contrib.wagtailstyleguide',\n\n 'modelcluster',\n 'compressor',\n 'taggit',\n 'widget_tweaks',\n 'webpack_loader',\n 'easy_thumbnails',\n 'parler',\n 'ckeditor',\n 'ckeditor_uploader',\n\n 'django.contrib.sites',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django_countries',\n 'rest_framework',\n 'autofixture',\n 'rules.apps.AutodiscoverRulesConfig',\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n\n 'euth.users.apps.UsersConfig',\n 'euth.organisations.apps.OrganisationsConfig',\n 'euth.projects.apps.ProjectsConfig',\n 'euth.comments.apps.CommentConfig',\n 'euth.phases.apps.PhasesConfig',\n 'euth.modules.apps.ModuleConfig',\n 'euth.ideas.apps.IdeaConfig',\n 'euth.ratings.apps.RatingsConfig',\n 'euth.reports.apps.ReportConfig',\n 'euth.dashboard.apps.DashboardConfig',\n 'euth.memberships.apps.MembershipsConfig',\n 'euth.documents.apps.DocumentConfig',\n 'euth.flashpoll.apps.FlashpollConfig',\n 'euth.contrib',\n]\n\nMIDDLEWARE_CLASSES = [\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'wagtail.wagtailcore.middleware.SiteMiddleware',\n 'wagtail.wagtailredirects.middleware.RedirectMiddleware',\n]\n\nSITE_ID = 1\n\nROOT_URLCONF = 'euth_wagtail.urls'\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, 'locale')]\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(PROJECT_DIR, 'templates'),\n ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'euth_wagtail.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.9/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n 'TEST': {\n 'NAME': os.path.join(BASE_DIR, 'test_db.sqlite3'),\n }\n }\n}\n\n\n# Auth\n# https://docs.djangoproject.com/en/1.8/topics/auth/customizing/\n\nAUTH_USER_MODEL = 'euth_users.User'\n\nAUTHENTICATION_BACKENDS = (\n 'rules.permissions.ObjectPermissionBackend',\n 'django.contrib.auth.backends.ModelBackend',\n 'allauth.account.auth_backends.AuthenticationBackend',\n)\n\nCKEDITOR_UPLOAD_PATH = \"uploads/\"\nCKEDITOR_ALLOW_NONIMAGE_FILES = False\n\nCKEDITOR_CONFIGS = {\n 'default': {\n 'width': '100%',\n 'toolbar': 'Custom',\n 'toolbar_Custom': [\n ['Bold', 'Italic', 'Underline'],\n ['NumberedList', 'BulletedList'],\n ['Link', 'Unlink']\n ]\n },\n 'image-editor': {\n 'width': '100%',\n 'toolbar': 'Custom',\n 'toolbar_Custom': [\n ['Bold', 'Italic', 'Underline'],\n ['Image'],\n ['NumberedList', 'BulletedList'],\n ['Link', 'Unlink']\n ]\n }\n}\n\nBLEACH_LIST = {\n 'default' : {\n 'tags': ['p','strong','em','u','ol','li','ul','a'],\n 'attributes': {\n 'a': ['href', 'rel'],\n },\n },\n 'image-editor': {\n 'tags': ['p','strong','em','u','ol','li','ul','a','img'],\n 'attributes': {\n 'a': ['href', 'rel'],\n 'img': ['src', 'alt', 'style']\n },\n 'styles': [\n 'float',\n 'margin',\n 'padding',\n 'width',\n 'height',\n 'margin-bottom',\n 'margin-top',\n 'margin-left',\n 'margin-right',\n ],\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.9/topics/i18n/\n\nLANGUAGE_CODE = 'en'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\nLANGUAGES = [\n ('en', _('English')),\n ('de', _('German')),\n ('it', _('Italien')),\n ('fr', _('French')),\n ('sv', _('Swedish')),\n ('sl', _('Slovene')),\n ('da', _('Danish')),\n]\n\nPARLER_LANGUAGES = {\n 1:[{'code': language_code } for language_code, language in LANGUAGES]\n}\n\n# fixtures\n\nFIXTURE_DIRS = [ os.path.join(PROJECT_DIR, 'fixtures') ]\n\nALLOWED_UPLOAD_IMAGES = ('image/png', 'image/jpeg', 'image/gif')\n\nTHUMBNAIL_ALIASES = {\n '': {\n 'heroimage': {'size': (1500, 500), 'crop': 'smart'},\n 'project_thumbnail': {'size': (520, 330), 'crop': 'smart'},\n 'idea_image': {'size': (800, 0), 'crop': 'scale'},\n 'organisation_thumbnail': {'size': (740, 540), 'crop': 'smart'},\n 'avatar_small': {'size': (60, 60), 'crop': 'smart'},\n 'org_avatar_small': {'size': (60, 60), 'crop': 'scale'},\n 'org_avatar_medium': {'size': (200, 200), 'crop': 'scale'},\n }\n}\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.9/howto/static-files/\n\nSTATICFILES_FINDERS = [\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'compressor.finders.CompressorFinder',\n]\n\n\nWEBPACK_LOADER = {\n 'DEFAULT': {\n 'CACHE': False,\n 'BUNDLE_DIR_NAME': 'bundles/', # must end with slash\n 'STATS_FILE': os.path.join(BASE_DIR, 'webpack-stats.json'),\n 'POLL_INTERVAL': 0.1,\n 'IGNORE': ['.+\\.hot-update.js', '.+\\.map']\n }\n}\n\n\nSTATICFILES_DIRS = [\n os.path.join(BASE_DIR, 'node_modules/jquery/dist'),\n os.path.join(BASE_DIR, 'node_modules/salvattore/dist'),\n os.path.join(BASE_DIR, 'node_modules/bootstrap-sass/assets/javascripts'),\n os.path.join(BASE_DIR, 'node_modules/bootstrap-sass/assets/stylesheets'),\n os.path.join(BASE_DIR, 'node_modules/font-awesome'),\n os.path.join(BASE_DIR, 'node_modules/owl.carousel/dist'),\n os.path.join(BASE_DIR, 'node_modules/flatpickr/assets'),\n os.path.join(BASE_DIR, 'node_modules/flatpickr/dist'),\n os.path.join(PROJECT_DIR, 'static'),\n]\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nSTATIC_URL = '/static/'\n\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nMEDIA_URL = '/media/'\n\nCOMPRESS_PRECOMPILERS = (\n ('text/x-scss', 'django_libsass.SassCompiler'),\n)\nLIBSASS_SOURCEMAPS = True\n\nEMAIL_SUBJECT_PREFIX = '[OPIN] '\n\n# Wagtail settings\n\nWAGTAIL_SITE_NAME = \"euth_wagtail\"\n\n# Authentification\n\nLOGIN_URL = 'account_login'\nLOGOUT_URL = 'account_logout'\nLOGIN_REDIRECT_URL = '/'\n\nACCOUNT_ADAPTER = 'euth.users.adapters.EuthAccountAdapter'\nACCOUNT_AUTHENTICATION_METHOD = 'email'\nACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 3\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_EMAIL_SUBJECT_PREFIX = EMAIL_SUBJECT_PREFIX\nACCOUNT_EMAIL_VERIFICATION = 'mandatory'\nACCOUNT_SIGNUP_FORM_CLASS = 'euth.users.forms.SignUpForm'\nACCOUNT_USER_DISPLAY = 'euth.users.services.account_user_display'\nACCOUNT_USER_MODEL_USERNAME_FIELD = 'username'\nACCOUNT_USERNAME_REQUIRED = True\nACCOUNT_LOGIN_ATTEMPTS_LIMIT = 10\nACCOUNT_LOGIN_ATTEMPTS_TIMEOUT = 300 # seconds\nACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True\nACCOUNT_LOGIN_ON_PASSWORD_RESET = True\nACCOUNT_LOGOUT_REDIRECT_URL = '/'\nSOCIALACCOUNT_EMAIL_VERIFICATION = False\n\n# Euth settings\n\nCOMMENTABLES = (\n ('euth_ideas', 'idea'),\n ('euth_documents', 'paragraph'),\n ('euth_documents', 'document'),\n ('euth_comments', 'comment'),\n)\n\nRATEABLES = COMMENTABLES\n\nREPORTABLES = COMMENTABLES\n\nFLASHPOLL_URL = \"https://opin.flashpoll.eu/\"\n", "path": "euth_wagtail/settings/base.py"}]}
3,817
118
gh_patches_debug_22816
rasdani/github-patches
git_diff
qutebrowser__qutebrowser-3393
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Rapid firing for completion The completion should have some way to spawn a completed command without closing the completion - for example, using <kbd>Shift-Enter</kbd> or middle-click. </issue> <code> [start of qutebrowser/mainwindow/statusbar/command.py] 1 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: 2 3 # Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]> 4 # 5 # This file is part of qutebrowser. 6 # 7 # qutebrowser is free software: you can redistribute it and/or modify 8 # it under the terms of the GNU General Public License as published by 9 # the Free Software Foundation, either version 3 of the License, or 10 # (at your option) any later version. 11 # 12 # qutebrowser is distributed in the hope that it will be useful, 13 # but WITHOUT ANY WARRANTY; without even the implied warranty of 14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 # GNU General Public License for more details. 16 # 17 # You should have received a copy of the GNU General Public License 18 # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. 19 20 """The commandline in the statusbar.""" 21 22 from PyQt5.QtCore import pyqtSignal, pyqtSlot, Qt, QSize 23 from PyQt5.QtWidgets import QSizePolicy 24 25 from qutebrowser.keyinput import modeman, modeparsers 26 from qutebrowser.commands import cmdexc, cmdutils 27 from qutebrowser.misc import cmdhistory, editor 28 from qutebrowser.misc import miscwidgets as misc 29 from qutebrowser.utils import usertypes, log, objreg, message 30 from qutebrowser.config import config 31 32 33 class Command(misc.MinimalLineEditMixin, misc.CommandLineEdit): 34 35 """The commandline part of the statusbar. 36 37 Attributes: 38 _win_id: The window ID this widget is associated with. 39 40 Signals: 41 got_cmd: Emitted when a command is triggered by the user. 42 arg: The command string and also potentially the count. 43 clear_completion_selection: Emitted before the completion widget is 44 hidden. 45 hide_completion: Emitted when the completion widget should be hidden. 46 update_completion: Emitted when the completion should be shown/updated. 47 show_cmd: Emitted when command input should be shown. 48 hide_cmd: Emitted when command input can be hidden. 49 """ 50 51 got_cmd = pyqtSignal([str], [str, int]) 52 clear_completion_selection = pyqtSignal() 53 hide_completion = pyqtSignal() 54 update_completion = pyqtSignal() 55 show_cmd = pyqtSignal() 56 hide_cmd = pyqtSignal() 57 58 def __init__(self, *, win_id, private, parent=None): 59 misc.CommandLineEdit.__init__(self, parent=parent) 60 misc.MinimalLineEditMixin.__init__(self) 61 self._win_id = win_id 62 if not private: 63 command_history = objreg.get('command-history') 64 self.history.history = command_history.data 65 self.history.changed.connect(command_history.changed) 66 self.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Ignored) 67 self.cursorPositionChanged.connect(self.update_completion) 68 self.textChanged.connect(self.update_completion) 69 self.textChanged.connect(self.updateGeometry) 70 self.textChanged.connect(self._incremental_search) 71 72 def prefix(self): 73 """Get the currently entered command prefix.""" 74 text = self.text() 75 if not text: 76 return '' 77 elif text[0] in modeparsers.STARTCHARS: 78 return text[0] 79 else: 80 return '' 81 82 def set_cmd_text(self, text): 83 """Preset the statusbar to some text. 84 85 Args: 86 text: The text to set as string. 87 """ 88 self.setText(text) 89 log.modes.debug("Setting command text, focusing {!r}".format(self)) 90 modeman.enter(self._win_id, usertypes.KeyMode.command, 'cmd focus') 91 self.setFocus() 92 self.show_cmd.emit() 93 94 @cmdutils.register(instance='status-command', name='set-cmd-text', 95 scope='window', maxsplit=0) 96 @cmdutils.argument('count', count=True) 97 def set_cmd_text_command(self, text, count=None, space=False, append=False, 98 run_on_count=False): 99 """Preset the statusbar to some text. 100 101 // 102 103 Wrapper for set_cmd_text to check the arguments and allow multiple 104 strings which will get joined. 105 106 Args: 107 text: The commandline to set. 108 count: The count if given. 109 space: If given, a space is added to the end. 110 append: If given, the text is appended to the current text. 111 run_on_count: If given with a count, the command is run with the 112 given count rather than setting the command text. 113 """ 114 if space: 115 text += ' ' 116 if append: 117 if not self.text(): 118 raise cmdexc.CommandError("No current text!") 119 text = self.text() + text 120 121 if not text or text[0] not in modeparsers.STARTCHARS: 122 raise cmdexc.CommandError( 123 "Invalid command text '{}'.".format(text)) 124 if run_on_count and count is not None: 125 self.got_cmd[str, int].emit(text, count) 126 else: 127 self.set_cmd_text(text) 128 129 @cmdutils.register(instance='status-command', 130 modes=[usertypes.KeyMode.command], scope='window') 131 def command_history_prev(self): 132 """Go back in the commandline history.""" 133 try: 134 if not self.history.is_browsing(): 135 item = self.history.start(self.text().strip()) 136 else: 137 item = self.history.previtem() 138 except (cmdhistory.HistoryEmptyError, 139 cmdhistory.HistoryEndReachedError): 140 return 141 if item: 142 self.set_cmd_text(item) 143 144 @cmdutils.register(instance='status-command', 145 modes=[usertypes.KeyMode.command], scope='window') 146 def command_history_next(self): 147 """Go forward in the commandline history.""" 148 if not self.history.is_browsing(): 149 return 150 try: 151 item = self.history.nextitem() 152 except cmdhistory.HistoryEndReachedError: 153 return 154 if item: 155 self.set_cmd_text(item) 156 157 @cmdutils.register(instance='status-command', 158 modes=[usertypes.KeyMode.command], scope='window') 159 def command_accept(self): 160 """Execute the command currently in the commandline.""" 161 prefixes = { 162 ':': '', 163 '/': 'search -- ', 164 '?': 'search -r -- ', 165 } 166 text = self.text() 167 self.history.append(text) 168 modeman.leave(self._win_id, usertypes.KeyMode.command, 'cmd accept') 169 self.got_cmd[str].emit(prefixes[text[0]] + text[1:]) 170 171 @cmdutils.register(instance='status-command', scope='window') 172 def edit_command(self, run=False): 173 """Open an editor to modify the current command. 174 175 Args: 176 run: Run the command if the editor exits successfully. 177 """ 178 ed = editor.ExternalEditor(parent=self) 179 180 def callback(text): 181 """Set the commandline to the edited text.""" 182 if not text or text[0] not in modeparsers.STARTCHARS: 183 message.error('command must start with one of {}' 184 .format(modeparsers.STARTCHARS)) 185 return 186 self.set_cmd_text(text) 187 if run: 188 self.command_accept() 189 190 ed.editing_finished.connect(callback) 191 ed.edit(self.text()) 192 193 @pyqtSlot(usertypes.KeyMode) 194 def on_mode_left(self, mode): 195 """Clear up when command mode was left. 196 197 - Clear the statusbar text if it's explicitly unfocused. 198 - Clear completion selection 199 - Hide completion 200 201 Args: 202 mode: The mode which was left. 203 """ 204 if mode == usertypes.KeyMode.command: 205 self.setText('') 206 self.history.stop() 207 self.hide_cmd.emit() 208 self.clear_completion_selection.emit() 209 self.hide_completion.emit() 210 211 def setText(self, text): 212 """Extend setText to set prefix and make sure the prompt is ok.""" 213 if not text: 214 pass 215 elif text[0] in modeparsers.STARTCHARS: 216 super().set_prompt(text[0]) 217 else: 218 raise AssertionError("setText got called with invalid text " 219 "'{}'!".format(text)) 220 super().setText(text) 221 222 def keyPressEvent(self, e): 223 """Override keyPressEvent to ignore Return key presses. 224 225 If this widget is focused, we are in passthrough key mode, and 226 Enter/Shift+Enter/etc. will cause QLineEdit to think it's finished 227 without command_accept to be called. 228 """ 229 if e.key() == Qt.Key_Return: 230 e.ignore() 231 return 232 else: 233 super().keyPressEvent(e) 234 235 def sizeHint(self): 236 """Dynamically calculate the needed size.""" 237 height = super().sizeHint().height() 238 text = self.text() 239 if not text: 240 text = 'x' 241 width = self.fontMetrics().width(text) 242 return QSize(width, height) 243 244 @pyqtSlot(str) 245 def _incremental_search(self, text): 246 if not config.val.search.incremental: 247 return 248 249 search_prefixes = { 250 '/': 'search -- ', 251 '?': 'search -r -- ', 252 } 253 254 if self.prefix() in ['/', '?']: 255 self.got_cmd[str].emit(search_prefixes[text[0]] + text[1:]) 256 [end of qutebrowser/mainwindow/statusbar/command.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/qutebrowser/mainwindow/statusbar/command.py b/qutebrowser/mainwindow/statusbar/command.py --- a/qutebrowser/mainwindow/statusbar/command.py +++ b/qutebrowser/mainwindow/statusbar/command.py @@ -156,8 +156,12 @@ @cmdutils.register(instance='status-command', modes=[usertypes.KeyMode.command], scope='window') - def command_accept(self): - """Execute the command currently in the commandline.""" + def command_accept(self, rapid=False): + """Execute the command currently in the commandline. + + Args: + rapid: Run the command without closing or clearing the command bar. + """ prefixes = { ':': '', '/': 'search -- ', @@ -165,7 +169,9 @@ } text = self.text() self.history.append(text) - modeman.leave(self._win_id, usertypes.KeyMode.command, 'cmd accept') + if not rapid: + modeman.leave(self._win_id, usertypes.KeyMode.command, + 'cmd accept') self.got_cmd[str].emit(prefixes[text[0]] + text[1:]) @cmdutils.register(instance='status-command', scope='window')
{"golden_diff": "diff --git a/qutebrowser/mainwindow/statusbar/command.py b/qutebrowser/mainwindow/statusbar/command.py\n--- a/qutebrowser/mainwindow/statusbar/command.py\n+++ b/qutebrowser/mainwindow/statusbar/command.py\n@@ -156,8 +156,12 @@\n \n @cmdutils.register(instance='status-command',\n modes=[usertypes.KeyMode.command], scope='window')\n- def command_accept(self):\n- \"\"\"Execute the command currently in the commandline.\"\"\"\n+ def command_accept(self, rapid=False):\n+ \"\"\"Execute the command currently in the commandline.\n+\n+ Args:\n+ rapid: Run the command without closing or clearing the command bar.\n+ \"\"\"\n prefixes = {\n ':': '',\n '/': 'search -- ',\n@@ -165,7 +169,9 @@\n }\n text = self.text()\n self.history.append(text)\n- modeman.leave(self._win_id, usertypes.KeyMode.command, 'cmd accept')\n+ if not rapid:\n+ modeman.leave(self._win_id, usertypes.KeyMode.command,\n+ 'cmd accept')\n self.got_cmd[str].emit(prefixes[text[0]] + text[1:])\n \n @cmdutils.register(instance='status-command', scope='window')\n", "issue": "Rapid firing for completion\nThe completion should have some way to spawn a completed command without closing the completion - for example, using <kbd>Shift-Enter</kbd> or middle-click.\n\n", "before_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"The commandline in the statusbar.\"\"\"\n\nfrom PyQt5.QtCore import pyqtSignal, pyqtSlot, Qt, QSize\nfrom PyQt5.QtWidgets import QSizePolicy\n\nfrom qutebrowser.keyinput import modeman, modeparsers\nfrom qutebrowser.commands import cmdexc, cmdutils\nfrom qutebrowser.misc import cmdhistory, editor\nfrom qutebrowser.misc import miscwidgets as misc\nfrom qutebrowser.utils import usertypes, log, objreg, message\nfrom qutebrowser.config import config\n\n\nclass Command(misc.MinimalLineEditMixin, misc.CommandLineEdit):\n\n \"\"\"The commandline part of the statusbar.\n\n Attributes:\n _win_id: The window ID this widget is associated with.\n\n Signals:\n got_cmd: Emitted when a command is triggered by the user.\n arg: The command string and also potentially the count.\n clear_completion_selection: Emitted before the completion widget is\n hidden.\n hide_completion: Emitted when the completion widget should be hidden.\n update_completion: Emitted when the completion should be shown/updated.\n show_cmd: Emitted when command input should be shown.\n hide_cmd: Emitted when command input can be hidden.\n \"\"\"\n\n got_cmd = pyqtSignal([str], [str, int])\n clear_completion_selection = pyqtSignal()\n hide_completion = pyqtSignal()\n update_completion = pyqtSignal()\n show_cmd = pyqtSignal()\n hide_cmd = pyqtSignal()\n\n def __init__(self, *, win_id, private, parent=None):\n misc.CommandLineEdit.__init__(self, parent=parent)\n misc.MinimalLineEditMixin.__init__(self)\n self._win_id = win_id\n if not private:\n command_history = objreg.get('command-history')\n self.history.history = command_history.data\n self.history.changed.connect(command_history.changed)\n self.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Ignored)\n self.cursorPositionChanged.connect(self.update_completion)\n self.textChanged.connect(self.update_completion)\n self.textChanged.connect(self.updateGeometry)\n self.textChanged.connect(self._incremental_search)\n\n def prefix(self):\n \"\"\"Get the currently entered command prefix.\"\"\"\n text = self.text()\n if not text:\n return ''\n elif text[0] in modeparsers.STARTCHARS:\n return text[0]\n else:\n return ''\n\n def set_cmd_text(self, text):\n \"\"\"Preset the statusbar to some text.\n\n Args:\n text: The text to set as string.\n \"\"\"\n self.setText(text)\n log.modes.debug(\"Setting command text, focusing {!r}\".format(self))\n modeman.enter(self._win_id, usertypes.KeyMode.command, 'cmd focus')\n self.setFocus()\n self.show_cmd.emit()\n\n @cmdutils.register(instance='status-command', name='set-cmd-text',\n scope='window', maxsplit=0)\n @cmdutils.argument('count', count=True)\n def set_cmd_text_command(self, text, count=None, space=False, append=False,\n run_on_count=False):\n \"\"\"Preset the statusbar to some text.\n\n //\n\n Wrapper for set_cmd_text to check the arguments and allow multiple\n strings which will get joined.\n\n Args:\n text: The commandline to set.\n count: The count if given.\n space: If given, a space is added to the end.\n append: If given, the text is appended to the current text.\n run_on_count: If given with a count, the command is run with the\n given count rather than setting the command text.\n \"\"\"\n if space:\n text += ' '\n if append:\n if not self.text():\n raise cmdexc.CommandError(\"No current text!\")\n text = self.text() + text\n\n if not text or text[0] not in modeparsers.STARTCHARS:\n raise cmdexc.CommandError(\n \"Invalid command text '{}'.\".format(text))\n if run_on_count and count is not None:\n self.got_cmd[str, int].emit(text, count)\n else:\n self.set_cmd_text(text)\n\n @cmdutils.register(instance='status-command',\n modes=[usertypes.KeyMode.command], scope='window')\n def command_history_prev(self):\n \"\"\"Go back in the commandline history.\"\"\"\n try:\n if not self.history.is_browsing():\n item = self.history.start(self.text().strip())\n else:\n item = self.history.previtem()\n except (cmdhistory.HistoryEmptyError,\n cmdhistory.HistoryEndReachedError):\n return\n if item:\n self.set_cmd_text(item)\n\n @cmdutils.register(instance='status-command',\n modes=[usertypes.KeyMode.command], scope='window')\n def command_history_next(self):\n \"\"\"Go forward in the commandline history.\"\"\"\n if not self.history.is_browsing():\n return\n try:\n item = self.history.nextitem()\n except cmdhistory.HistoryEndReachedError:\n return\n if item:\n self.set_cmd_text(item)\n\n @cmdutils.register(instance='status-command',\n modes=[usertypes.KeyMode.command], scope='window')\n def command_accept(self):\n \"\"\"Execute the command currently in the commandline.\"\"\"\n prefixes = {\n ':': '',\n '/': 'search -- ',\n '?': 'search -r -- ',\n }\n text = self.text()\n self.history.append(text)\n modeman.leave(self._win_id, usertypes.KeyMode.command, 'cmd accept')\n self.got_cmd[str].emit(prefixes[text[0]] + text[1:])\n\n @cmdutils.register(instance='status-command', scope='window')\n def edit_command(self, run=False):\n \"\"\"Open an editor to modify the current command.\n\n Args:\n run: Run the command if the editor exits successfully.\n \"\"\"\n ed = editor.ExternalEditor(parent=self)\n\n def callback(text):\n \"\"\"Set the commandline to the edited text.\"\"\"\n if not text or text[0] not in modeparsers.STARTCHARS:\n message.error('command must start with one of {}'\n .format(modeparsers.STARTCHARS))\n return\n self.set_cmd_text(text)\n if run:\n self.command_accept()\n\n ed.editing_finished.connect(callback)\n ed.edit(self.text())\n\n @pyqtSlot(usertypes.KeyMode)\n def on_mode_left(self, mode):\n \"\"\"Clear up when command mode was left.\n\n - Clear the statusbar text if it's explicitly unfocused.\n - Clear completion selection\n - Hide completion\n\n Args:\n mode: The mode which was left.\n \"\"\"\n if mode == usertypes.KeyMode.command:\n self.setText('')\n self.history.stop()\n self.hide_cmd.emit()\n self.clear_completion_selection.emit()\n self.hide_completion.emit()\n\n def setText(self, text):\n \"\"\"Extend setText to set prefix and make sure the prompt is ok.\"\"\"\n if not text:\n pass\n elif text[0] in modeparsers.STARTCHARS:\n super().set_prompt(text[0])\n else:\n raise AssertionError(\"setText got called with invalid text \"\n \"'{}'!\".format(text))\n super().setText(text)\n\n def keyPressEvent(self, e):\n \"\"\"Override keyPressEvent to ignore Return key presses.\n\n If this widget is focused, we are in passthrough key mode, and\n Enter/Shift+Enter/etc. will cause QLineEdit to think it's finished\n without command_accept to be called.\n \"\"\"\n if e.key() == Qt.Key_Return:\n e.ignore()\n return\n else:\n super().keyPressEvent(e)\n\n def sizeHint(self):\n \"\"\"Dynamically calculate the needed size.\"\"\"\n height = super().sizeHint().height()\n text = self.text()\n if not text:\n text = 'x'\n width = self.fontMetrics().width(text)\n return QSize(width, height)\n\n @pyqtSlot(str)\n def _incremental_search(self, text):\n if not config.val.search.incremental:\n return\n\n search_prefixes = {\n '/': 'search -- ',\n '?': 'search -r -- ',\n }\n\n if self.prefix() in ['/', '?']:\n self.got_cmd[str].emit(search_prefixes[text[0]] + text[1:])\n", "path": "qutebrowser/mainwindow/statusbar/command.py"}]}
3,230
275
gh_patches_debug_21542
rasdani/github-patches
git_diff
MycroftAI__mycroft-core-202
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Too many false positives when detecting wake word </issue> <code> [start of mycroft/client/speech/local_recognizer.py] 1 # Copyright 2016 Mycroft AI, Inc. 2 # 3 # This file is part of Mycroft Core. 4 # 5 # Mycroft Core is free software: you can redistribute it and/or modify 6 # it under the terms of the GNU General Public License as published by 7 # the Free Software Foundation, either version 3 of the License, or 8 # (at your option) any later version. 9 # 10 # Mycroft Core is distributed in the hope that it will be useful, 11 # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 # GNU General Public License for more details. 14 # 15 # You should have received a copy of the GNU General Public License 16 # along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>. 17 18 19 import time 20 21 import os 22 from pocketsphinx.pocketsphinx import Decoder 23 24 __author__ = 'seanfitz, jdorleans' 25 26 BASEDIR = os.path.dirname(os.path.abspath(__file__)) 27 28 29 class LocalRecognizer(object): 30 def __init__(self, sample_rate=16000, lang="en-us", key_phrase="mycroft"): 31 self.lang = lang 32 self.key_phrase = key_phrase 33 self.sample_rate = sample_rate 34 self.configure() 35 36 def configure(self): 37 config = Decoder.default_config() 38 config.set_string('-hmm', os.path.join(BASEDIR, 'model', self.lang, 39 'hmm')) 40 config.set_string('-dict', os.path.join(BASEDIR, 'model', self.lang, 41 'mycroft-en-us.dict')) 42 config.set_string('-keyphrase', self.key_phrase) 43 config.set_float('-kws_threshold', float('1e-45')) 44 config.set_float('-samprate', self.sample_rate) 45 config.set_int('-nfft', 2048) 46 config.set_string('-logfn', '/dev/null') 47 self.decoder = Decoder(config) 48 49 def transcribe(self, byte_data, metrics=None): 50 start = time.time() 51 self.decoder.start_utt() 52 self.decoder.process_raw(byte_data, False, False) 53 self.decoder.end_utt() 54 if metrics: 55 metrics.timer("mycroft.stt.local.time_s", time.time() - start) 56 return self.decoder.hyp() 57 58 def is_recognized(self, byte_data, metrics): 59 hyp = self.transcribe(byte_data, metrics) 60 return hyp and self.key_phrase in hyp.hypstr.lower() 61 62 def found_wake_word(self, hypothesis): 63 return hypothesis and self.key_phrase in hypothesis.hypstr.lower() 64 [end of mycroft/client/speech/local_recognizer.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mycroft/client/speech/local_recognizer.py b/mycroft/client/speech/local_recognizer.py --- a/mycroft/client/speech/local_recognizer.py +++ b/mycroft/client/speech/local_recognizer.py @@ -27,7 +27,8 @@ class LocalRecognizer(object): - def __init__(self, sample_rate=16000, lang="en-us", key_phrase="mycroft"): + def __init__(self, sample_rate=16000, lang="en-us", + key_phrase="hey mycroft"): self.lang = lang self.key_phrase = key_phrase self.sample_rate = sample_rate @@ -40,7 +41,7 @@ config.set_string('-dict', os.path.join(BASEDIR, 'model', self.lang, 'mycroft-en-us.dict')) config.set_string('-keyphrase', self.key_phrase) - config.set_float('-kws_threshold', float('1e-45')) + config.set_float('-kws_threshold', float('1e-90')) config.set_float('-samprate', self.sample_rate) config.set_int('-nfft', 2048) config.set_string('-logfn', '/dev/null')
{"golden_diff": "diff --git a/mycroft/client/speech/local_recognizer.py b/mycroft/client/speech/local_recognizer.py\n--- a/mycroft/client/speech/local_recognizer.py\n+++ b/mycroft/client/speech/local_recognizer.py\n@@ -27,7 +27,8 @@\n \n \n class LocalRecognizer(object):\n- def __init__(self, sample_rate=16000, lang=\"en-us\", key_phrase=\"mycroft\"):\n+ def __init__(self, sample_rate=16000, lang=\"en-us\",\n+ key_phrase=\"hey mycroft\"):\n self.lang = lang\n self.key_phrase = key_phrase\n self.sample_rate = sample_rate\n@@ -40,7 +41,7 @@\n config.set_string('-dict', os.path.join(BASEDIR, 'model', self.lang,\n 'mycroft-en-us.dict'))\n config.set_string('-keyphrase', self.key_phrase)\n- config.set_float('-kws_threshold', float('1e-45'))\n+ config.set_float('-kws_threshold', float('1e-90'))\n config.set_float('-samprate', self.sample_rate)\n config.set_int('-nfft', 2048)\n config.set_string('-logfn', '/dev/null')\n", "issue": "Too many false positives when detecting wake word\n\n", "before_files": [{"content": "# Copyright 2016 Mycroft AI, Inc.\n#\n# This file is part of Mycroft Core.\n#\n# Mycroft Core is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Mycroft Core is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.\n\n\nimport time\n\nimport os\nfrom pocketsphinx.pocketsphinx import Decoder\n\n__author__ = 'seanfitz, jdorleans'\n\nBASEDIR = os.path.dirname(os.path.abspath(__file__))\n\n\nclass LocalRecognizer(object):\n def __init__(self, sample_rate=16000, lang=\"en-us\", key_phrase=\"mycroft\"):\n self.lang = lang\n self.key_phrase = key_phrase\n self.sample_rate = sample_rate\n self.configure()\n\n def configure(self):\n config = Decoder.default_config()\n config.set_string('-hmm', os.path.join(BASEDIR, 'model', self.lang,\n 'hmm'))\n config.set_string('-dict', os.path.join(BASEDIR, 'model', self.lang,\n 'mycroft-en-us.dict'))\n config.set_string('-keyphrase', self.key_phrase)\n config.set_float('-kws_threshold', float('1e-45'))\n config.set_float('-samprate', self.sample_rate)\n config.set_int('-nfft', 2048)\n config.set_string('-logfn', '/dev/null')\n self.decoder = Decoder(config)\n\n def transcribe(self, byte_data, metrics=None):\n start = time.time()\n self.decoder.start_utt()\n self.decoder.process_raw(byte_data, False, False)\n self.decoder.end_utt()\n if metrics:\n metrics.timer(\"mycroft.stt.local.time_s\", time.time() - start)\n return self.decoder.hyp()\n\n def is_recognized(self, byte_data, metrics):\n hyp = self.transcribe(byte_data, metrics)\n return hyp and self.key_phrase in hyp.hypstr.lower()\n\n def found_wake_word(self, hypothesis):\n return hypothesis and self.key_phrase in hypothesis.hypstr.lower()\n", "path": "mycroft/client/speech/local_recognizer.py"}]}
1,225
277
gh_patches_debug_26642
rasdani/github-patches
git_diff
cloudtools__troposphere-174
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> AutoScalingGroup LaunchConfigurationName requirement AWS::AutoScaling::AutoScalingGroup<sup>1</sup> specifies that the LaunchConfigurationName and InstanceId fields are conditionally required (one must be present, and they are mutually exclusive) but the Troposphere class 'AutoScalingGroup' requires the LaunchConfigurationName parameter unconditionally. The result of this is that an autoscaling group requiring an InstanceId can not be provisioned through the 'AutoScalingGroup' class. <sup>1</sup> (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-instanceid) </issue> <code> [start of troposphere/autoscaling.py] 1 # Copyright (c) 2012-2013, Mark Peek <[email protected]> 2 # All rights reserved. 3 # 4 # See LICENSE file for full license. 5 6 from . import AWSHelperFn, AWSObject, AWSProperty, Ref 7 from .validators import boolean, integer 8 from . import cloudformation 9 10 11 EC2_INSTANCE_LAUNCH = "autoscaling:EC2_INSTANCE_LAUNCH" 12 EC2_INSTANCE_LAUNCH_ERROR = "autoscaling:EC2_INSTANCE_LAUNCH_ERROR" 13 EC2_INSTANCE_TERMINATE = "autoscaling:EC2_INSTANCE_TERMINATE" 14 EC2_INSTANCE_TERMINATE_ERROR = "autoscaling:EC2_INSTANCE_TERMINATE_ERROR" 15 TEST_NOTIFICATION = "autoscaling:TEST_NOTIFICATION" 16 17 # Termination Policy constants 18 Default = 'Default' 19 OldestInstance = 'OldestInstance' 20 NewestInstance = 'NewestInstance' 21 OldestLaunchConfiguration = 'OldestLaunchConfiguration' 22 ClosestToNextInstanceHour = 'ClosestToNextInstanceHour' 23 24 25 class Tag(AWSHelperFn): 26 def __init__(self, key, value, propogate): 27 self.data = { 28 'Key': key, 29 'Value': value, 30 'PropagateAtLaunch': propogate, 31 } 32 33 def JSONrepr(self): 34 return self.data 35 36 37 class Tags(AWSHelperFn): 38 defaultPropagateAtLaunch = True 39 manyType = [type([]), type(())] 40 41 def __init__(self, **kwargs): 42 self.tags = [] 43 for k, v in sorted(kwargs.iteritems()): 44 if type(v) in self.manyType: 45 propagate = str(v[1]).lower() 46 v = v[0] 47 else: 48 propagate = str(self.defaultPropagateAtLaunch).lower() 49 self.tags.append({ 50 'Key': k, 51 'Value': v, 52 'PropagateAtLaunch': propagate, 53 }) 54 55 def JSONrepr(self): 56 return self.tags 57 58 59 class NotificationConfiguration(AWSProperty): 60 props = { 61 'TopicARN': (basestring, True), 62 'NotificationTypes': (list, True), 63 } 64 65 66 class MetricsCollection(AWSProperty): 67 props = { 68 'Granularity': (basestring, True), 69 'Metrics': (list, False), 70 } 71 72 73 class Metadata(AWSHelperFn): 74 def __init__(self, init, authentication=None): 75 self.validate(init, authentication) 76 # get keys and values from init and authentication 77 # safe to use cause its always one key 78 initKey, initValue = init.data.popitem() 79 self.data = {initKey: initValue} 80 if authentication: 81 authKey, authValue = authentication.data.popitem() 82 self.data[authKey] = authValue 83 84 def validate(self, init, authentication): 85 if not isinstance(init, cloudformation.Init): 86 raise ValueError( 87 'init must be of type cloudformation.Init' 88 ) 89 90 is_instance = isinstance(authentication, cloudformation.Authentication) 91 if authentication and not is_instance: 92 raise ValueError( 93 'authentication must be of type cloudformation.Authentication' 94 ) 95 96 def JSONrepr(self): 97 return self.data 98 99 100 class AutoScalingGroup(AWSObject): 101 resource_type = "AWS::AutoScaling::AutoScalingGroup" 102 103 props = { 104 'AvailabilityZones': (list, True), 105 'Cooldown': (integer, False), 106 'DesiredCapacity': (integer, False), 107 'HealthCheckGracePeriod': (int, False), 108 'HealthCheckType': (basestring, False), 109 'InstanceId': (basestring, False), 110 'LaunchConfigurationName': (basestring, True), 111 'LoadBalancerNames': (list, False), 112 'MaxSize': (integer, True), 113 'MetricsCollection': ([MetricsCollection], False), 114 'MinSize': (integer, True), 115 'NotificationConfiguration': (NotificationConfiguration, False), 116 'PlacementGroup': (basestring, False), 117 'Tags': (list, False), # Although docs say these are required 118 'TerminationPolicies': ([basestring], False), 119 'VPCZoneIdentifier': (list, False), 120 } 121 122 def validate(self): 123 if 'UpdatePolicy' in self.resource: 124 update_policy = self.resource['UpdatePolicy'] 125 126 isMinRef = isinstance(update_policy.MinInstancesInService, Ref) 127 isMaxRef = isinstance(self.MaxSize, Ref) 128 129 if not (isMinRef or isMaxRef): 130 minCount = int(update_policy.MinInstancesInService) 131 maxCount = int(self.MaxSize) 132 133 if minCount >= maxCount: 134 raise ValueError( 135 "The UpdatePolicy attribute " 136 "MinInstancesInService must be less than the " 137 "autoscaling group's MaxSize") 138 return True 139 140 141 class LaunchConfiguration(AWSObject): 142 resource_type = "AWS::AutoScaling::LaunchConfiguration" 143 144 props = { 145 'AssociatePublicIpAddress': (boolean, False), 146 'BlockDeviceMappings': (list, False), 147 'EbsOptimized': (boolean, False), 148 'IamInstanceProfile': (basestring, False), 149 'ImageId': (basestring, True), 150 'InstanceId': (basestring, False), 151 'InstanceMonitoring': (boolean, False), 152 'InstanceType': (basestring, True), 153 'KernelId': (basestring, False), 154 'KeyName': (basestring, False), 155 'Metadata': (Metadata, False), 156 'RamDiskId': (basestring, False), 157 'SecurityGroups': (list, False), 158 'SpotPrice': (basestring, False), 159 'UserData': (basestring, False), 160 } 161 162 163 class ScalingPolicy(AWSObject): 164 resource_type = "AWS::AutoScaling::ScalingPolicy" 165 166 props = { 167 'AdjustmentType': (basestring, True), 168 'AutoScalingGroupName': (basestring, True), 169 'Cooldown': (integer, False), 170 'ScalingAdjustment': (basestring, True), 171 } 172 173 174 class ScheduledAction(AWSObject): 175 resource_type = "AWS::AutoScaling::ScheduledAction" 176 177 props = { 178 'AutoScalingGroupName': (basestring, True), 179 'DesiredCapacity': (integer, False), 180 'EndTime': (basestring, True), 181 'MaxSize': (integer, False), 182 'MinSize': (integer, False), 183 'Recurrence': (basestring, True), 184 'StartTime': (basestring, True), 185 } 186 187 188 class Trigger(AWSObject): 189 resource_type = "AWS::AutoScaling::Trigger" 190 191 props = { 192 'AutoScalingGroupName': (basestring, True), 193 'BreachDuration': (integer, True), 194 'Dimensions': (list, True), 195 'LowerBreachScaleIncrement': (integer, False), 196 'LowerThreshold': (integer, True), 197 'MetricName': (basestring, True), 198 'Namespace': (basestring, True), 199 'Period': (integer, True), 200 'Statistic': (basestring, True), 201 'Unit': (basestring, False), 202 'UpperBreachScaleIncrement': (integer, False), 203 'UpperThreshold': (integer, True), 204 } 205 206 207 class EBSBlockDevice(AWSProperty): 208 # http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig-blockdev-template.html 209 props = { 210 'DeleteOnTermination': (boolean, False), 211 'Iops': (integer, False), 212 'SnapshotId': (basestring, False), 213 'VolumeSize': (integer, False), 214 'VolumeType': (basestring, False), 215 } 216 217 218 class BlockDeviceMapping(AWSProperty): 219 # http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig-blockdev-mapping.html 220 props = { 221 'DeviceName': (basestring, True), 222 'Ebs': (EBSBlockDevice, False), 223 'NoDevice': (boolean, False), 224 'VirtualName': (basestring, False), 225 } 226 [end of troposphere/autoscaling.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/troposphere/autoscaling.py b/troposphere/autoscaling.py --- a/troposphere/autoscaling.py +++ b/troposphere/autoscaling.py @@ -107,7 +107,7 @@ 'HealthCheckGracePeriod': (int, False), 'HealthCheckType': (basestring, False), 'InstanceId': (basestring, False), - 'LaunchConfigurationName': (basestring, True), + 'LaunchConfigurationName': (basestring, False), 'LoadBalancerNames': (list, False), 'MaxSize': (integer, True), 'MetricsCollection': ([MetricsCollection], False), @@ -135,6 +135,16 @@ "The UpdatePolicy attribute " "MinInstancesInService must be less than the " "autoscaling group's MaxSize") + launch_config = self.properties.get('LaunchConfigurationName') + instance_id = self.properties.get('InstanceId') + if launch_config and instance_id: + raise ValueError("LaunchConfigurationName and InstanceId " + "are mutually exclusive.") + if not launch_config and not instance_id: + raise ValueError("Must specify either LaunchConfigurationName or " + "InstanceId: http://docs.aws.amazon.com/AWSCloud" + "Formation/latest/UserGuide/aws-properties-as-gr" + "oup.html#cfn-as-group-instanceid") return True
{"golden_diff": "diff --git a/troposphere/autoscaling.py b/troposphere/autoscaling.py\n--- a/troposphere/autoscaling.py\n+++ b/troposphere/autoscaling.py\n@@ -107,7 +107,7 @@\n 'HealthCheckGracePeriod': (int, False),\n 'HealthCheckType': (basestring, False),\n 'InstanceId': (basestring, False),\n- 'LaunchConfigurationName': (basestring, True),\n+ 'LaunchConfigurationName': (basestring, False),\n 'LoadBalancerNames': (list, False),\n 'MaxSize': (integer, True),\n 'MetricsCollection': ([MetricsCollection], False),\n@@ -135,6 +135,16 @@\n \"The UpdatePolicy attribute \"\n \"MinInstancesInService must be less than the \"\n \"autoscaling group's MaxSize\")\n+ launch_config = self.properties.get('LaunchConfigurationName')\n+ instance_id = self.properties.get('InstanceId')\n+ if launch_config and instance_id:\n+ raise ValueError(\"LaunchConfigurationName and InstanceId \"\n+ \"are mutually exclusive.\")\n+ if not launch_config and not instance_id:\n+ raise ValueError(\"Must specify either LaunchConfigurationName or \"\n+ \"InstanceId: http://docs.aws.amazon.com/AWSCloud\"\n+ \"Formation/latest/UserGuide/aws-properties-as-gr\"\n+ \"oup.html#cfn-as-group-instanceid\")\n return True\n", "issue": "AutoScalingGroup LaunchConfigurationName requirement\nAWS::AutoScaling::AutoScalingGroup<sup>1</sup> specifies that the LaunchConfigurationName and InstanceId fields are conditionally required (one must be present, and they are mutually exclusive) but the Troposphere class 'AutoScalingGroup' requires the LaunchConfigurationName parameter unconditionally. The result of this is that an autoscaling group requiring an InstanceId can not be provisioned through the 'AutoScalingGroup' class.\n\n<sup>1</sup> (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html#cfn-as-group-instanceid)\n\n", "before_files": [{"content": "# Copyright (c) 2012-2013, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSHelperFn, AWSObject, AWSProperty, Ref\nfrom .validators import boolean, integer\nfrom . import cloudformation\n\n\nEC2_INSTANCE_LAUNCH = \"autoscaling:EC2_INSTANCE_LAUNCH\"\nEC2_INSTANCE_LAUNCH_ERROR = \"autoscaling:EC2_INSTANCE_LAUNCH_ERROR\"\nEC2_INSTANCE_TERMINATE = \"autoscaling:EC2_INSTANCE_TERMINATE\"\nEC2_INSTANCE_TERMINATE_ERROR = \"autoscaling:EC2_INSTANCE_TERMINATE_ERROR\"\nTEST_NOTIFICATION = \"autoscaling:TEST_NOTIFICATION\"\n\n# Termination Policy constants\nDefault = 'Default'\nOldestInstance = 'OldestInstance'\nNewestInstance = 'NewestInstance'\nOldestLaunchConfiguration = 'OldestLaunchConfiguration'\nClosestToNextInstanceHour = 'ClosestToNextInstanceHour'\n\n\nclass Tag(AWSHelperFn):\n def __init__(self, key, value, propogate):\n self.data = {\n 'Key': key,\n 'Value': value,\n 'PropagateAtLaunch': propogate,\n }\n\n def JSONrepr(self):\n return self.data\n\n\nclass Tags(AWSHelperFn):\n defaultPropagateAtLaunch = True\n manyType = [type([]), type(())]\n\n def __init__(self, **kwargs):\n self.tags = []\n for k, v in sorted(kwargs.iteritems()):\n if type(v) in self.manyType:\n propagate = str(v[1]).lower()\n v = v[0]\n else:\n propagate = str(self.defaultPropagateAtLaunch).lower()\n self.tags.append({\n 'Key': k,\n 'Value': v,\n 'PropagateAtLaunch': propagate,\n })\n\n def JSONrepr(self):\n return self.tags\n\n\nclass NotificationConfiguration(AWSProperty):\n props = {\n 'TopicARN': (basestring, True),\n 'NotificationTypes': (list, True),\n }\n\n\nclass MetricsCollection(AWSProperty):\n props = {\n 'Granularity': (basestring, True),\n 'Metrics': (list, False),\n }\n\n\nclass Metadata(AWSHelperFn):\n def __init__(self, init, authentication=None):\n self.validate(init, authentication)\n # get keys and values from init and authentication\n # safe to use cause its always one key\n initKey, initValue = init.data.popitem()\n self.data = {initKey: initValue}\n if authentication:\n authKey, authValue = authentication.data.popitem()\n self.data[authKey] = authValue\n\n def validate(self, init, authentication):\n if not isinstance(init, cloudformation.Init):\n raise ValueError(\n 'init must be of type cloudformation.Init'\n )\n\n is_instance = isinstance(authentication, cloudformation.Authentication)\n if authentication and not is_instance:\n raise ValueError(\n 'authentication must be of type cloudformation.Authentication'\n )\n\n def JSONrepr(self):\n return self.data\n\n\nclass AutoScalingGroup(AWSObject):\n resource_type = \"AWS::AutoScaling::AutoScalingGroup\"\n\n props = {\n 'AvailabilityZones': (list, True),\n 'Cooldown': (integer, False),\n 'DesiredCapacity': (integer, False),\n 'HealthCheckGracePeriod': (int, False),\n 'HealthCheckType': (basestring, False),\n 'InstanceId': (basestring, False),\n 'LaunchConfigurationName': (basestring, True),\n 'LoadBalancerNames': (list, False),\n 'MaxSize': (integer, True),\n 'MetricsCollection': ([MetricsCollection], False),\n 'MinSize': (integer, True),\n 'NotificationConfiguration': (NotificationConfiguration, False),\n 'PlacementGroup': (basestring, False),\n 'Tags': (list, False), # Although docs say these are required\n 'TerminationPolicies': ([basestring], False),\n 'VPCZoneIdentifier': (list, False),\n }\n\n def validate(self):\n if 'UpdatePolicy' in self.resource:\n update_policy = self.resource['UpdatePolicy']\n\n isMinRef = isinstance(update_policy.MinInstancesInService, Ref)\n isMaxRef = isinstance(self.MaxSize, Ref)\n\n if not (isMinRef or isMaxRef):\n minCount = int(update_policy.MinInstancesInService)\n maxCount = int(self.MaxSize)\n\n if minCount >= maxCount:\n raise ValueError(\n \"The UpdatePolicy attribute \"\n \"MinInstancesInService must be less than the \"\n \"autoscaling group's MaxSize\")\n return True\n\n\nclass LaunchConfiguration(AWSObject):\n resource_type = \"AWS::AutoScaling::LaunchConfiguration\"\n\n props = {\n 'AssociatePublicIpAddress': (boolean, False),\n 'BlockDeviceMappings': (list, False),\n 'EbsOptimized': (boolean, False),\n 'IamInstanceProfile': (basestring, False),\n 'ImageId': (basestring, True),\n 'InstanceId': (basestring, False),\n 'InstanceMonitoring': (boolean, False),\n 'InstanceType': (basestring, True),\n 'KernelId': (basestring, False),\n 'KeyName': (basestring, False),\n 'Metadata': (Metadata, False),\n 'RamDiskId': (basestring, False),\n 'SecurityGroups': (list, False),\n 'SpotPrice': (basestring, False),\n 'UserData': (basestring, False),\n }\n\n\nclass ScalingPolicy(AWSObject):\n resource_type = \"AWS::AutoScaling::ScalingPolicy\"\n\n props = {\n 'AdjustmentType': (basestring, True),\n 'AutoScalingGroupName': (basestring, True),\n 'Cooldown': (integer, False),\n 'ScalingAdjustment': (basestring, True),\n }\n\n\nclass ScheduledAction(AWSObject):\n resource_type = \"AWS::AutoScaling::ScheduledAction\"\n\n props = {\n 'AutoScalingGroupName': (basestring, True),\n 'DesiredCapacity': (integer, False),\n 'EndTime': (basestring, True),\n 'MaxSize': (integer, False),\n 'MinSize': (integer, False),\n 'Recurrence': (basestring, True),\n 'StartTime': (basestring, True),\n }\n\n\nclass Trigger(AWSObject):\n resource_type = \"AWS::AutoScaling::Trigger\"\n\n props = {\n 'AutoScalingGroupName': (basestring, True),\n 'BreachDuration': (integer, True),\n 'Dimensions': (list, True),\n 'LowerBreachScaleIncrement': (integer, False),\n 'LowerThreshold': (integer, True),\n 'MetricName': (basestring, True),\n 'Namespace': (basestring, True),\n 'Period': (integer, True),\n 'Statistic': (basestring, True),\n 'Unit': (basestring, False),\n 'UpperBreachScaleIncrement': (integer, False),\n 'UpperThreshold': (integer, True),\n }\n\n\nclass EBSBlockDevice(AWSProperty):\n # http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig-blockdev-template.html\n props = {\n 'DeleteOnTermination': (boolean, False),\n 'Iops': (integer, False),\n 'SnapshotId': (basestring, False),\n 'VolumeSize': (integer, False),\n 'VolumeType': (basestring, False),\n }\n\n\nclass BlockDeviceMapping(AWSProperty):\n # http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig-blockdev-mapping.html\n props = {\n 'DeviceName': (basestring, True),\n 'Ebs': (EBSBlockDevice, False),\n 'NoDevice': (boolean, False),\n 'VirtualName': (basestring, False),\n }\n", "path": "troposphere/autoscaling.py"}]}
2,959
308
gh_patches_debug_39862
rasdani/github-patches
git_diff
encode__uvicorn-33
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Transfer-Encoding: Chunked Responses need to support `Transfer-Encoding: Chunked` </issue> <code> [start of uvicorn/protocols/http.py] 1 import asyncio 2 import collections 3 import email 4 import http 5 import httptools 6 import os 7 import time 8 9 from uvicorn.protocols.websocket import websocket_upgrade 10 11 12 def set_time_and_date(): 13 global CURRENT_TIME 14 global DATE_HEADER 15 16 CURRENT_TIME = time.time() 17 DATE_HEADER = b''.join([ 18 b'date: ', 19 email.utils.formatdate(CURRENT_TIME, usegmt=True).encode(), 20 b'\r\n' 21 ]) 22 23 24 def get_status_line(status_code): 25 try: 26 phrase = http.HTTPStatus(status_code).phrase.encode() 27 except ValueError: 28 phrase = b'' 29 return b''.join([ 30 b'HTTP/1.1 ', str(status_code).encode(), b' ', phrase, b'\r\n' 31 ]) 32 33 34 CURRENT_TIME = 0.0 35 DATE_HEADER = b'' 36 SERVER_HEADER = b'server: uvicorn\r\n' 37 STATUS_LINE = { 38 status_code: get_status_line(status_code) for status_code in range(100, 600) 39 } 40 41 LOW_WATER_LIMIT = 16384 42 HIGH_WATER_LIMIT = 65536 43 MAX_PIPELINED_REQUESTS = 20 44 45 set_time_and_date() 46 47 48 class BodyChannel(object): 49 __slots__ = ['_queue', '_protocol', 'name'] 50 51 def __init__(self, protocol): 52 self._queue = asyncio.Queue() 53 self._protocol = protocol 54 self.name = 'body:%d' % id(self) 55 56 def _put(self, message): 57 self._queue.put_nowait(message) 58 self._protocol.buffer_size += len(message['content']) 59 self._protocol.check_pause_reading() 60 61 async def receive(self): 62 message = await self._queue.get() 63 self._protocol.buffer_size -= len(message['content']) 64 self._protocol.check_resume_reading() 65 66 67 class ReplyChannel(object): 68 __slots__ = ['_protocol', 'name'] 69 70 def __init__(self, protocol): 71 self._protocol = protocol 72 self.name = 'reply:%d' % id(self) 73 74 async def send(self, message): 75 protocol = self._protocol 76 transport = protocol.transport 77 78 if transport is None: 79 return 80 81 if protocol.write_paused: 82 await transport.drain() 83 84 status = message.get('status') 85 headers = message.get('headers') 86 content = message.get('content') 87 more_content = message.get('more_content', False) 88 89 if status is not None: 90 response = [ 91 STATUS_LINE[status], 92 SERVER_HEADER, 93 DATE_HEADER, 94 ] 95 transport.write(b''.join(response)) 96 97 if headers is not None: 98 response = [] 99 if content is not None and not more_content: 100 response = [b'content-length: ', str(len(content)).encode(), b'\r\n'] 101 102 for header_name, header_value in headers: 103 response.extend([header_name, b': ', header_value, b'\r\n']) 104 response.append(b'\r\n') 105 106 transport.write(b''.join(response)) 107 108 if content is not None: 109 transport.write(content) 110 111 if not more_content: 112 if (not status) or (not self._protocol.request_parser.should_keep_alive()): 113 transport.close() 114 elif protocol.pipeline_queue: 115 message, channels = protocol.pipeline_queue.popleft() 116 protocol.loop.create_task(protocol.consumer(message, channels)) 117 protocol.check_resume_reading() 118 else: 119 protocol.has_active_request = False 120 121 122 class HttpProtocol(asyncio.Protocol): 123 __slots__ = [ 124 'consumer', 'loop', 'request_parser', 125 'base_message', 'base_channels', 126 'transport', 'message', 'channels', 'headers', 'upgrade', 127 'read_paused', 'write_paused', 128 'buffer_size', 'high_water_limit', 'low_water_limit', 129 'has_active_request', 'max_pipelined_requests', 'pipeline_queue' 130 ] 131 132 def __init__(self, consumer, loop, sock, cfg): 133 self.consumer = consumer 134 self.loop = loop 135 self.request_parser = httptools.HttpRequestParser(self) 136 137 self.base_message = { 138 'channel': 'http.request', 139 'scheme': 'https' if cfg.is_ssl else 'http', 140 'root_path': os.environ.get('SCRIPT_NAME', ''), 141 'server': sock.getsockname() 142 } 143 self.base_channels = { 144 'reply': ReplyChannel(self) 145 } 146 147 self.transport = None 148 self.message = None 149 self.channels = None 150 self.headers = None 151 self.upgrade = None 152 153 self.read_paused = False 154 self.write_paused = False 155 156 self.buffer_size = 0 157 self.high_water_limit = HIGH_WATER_LIMIT 158 self.low_water_limit = LOW_WATER_LIMIT 159 160 self.has_active_request = False 161 self.max_pipelined_requests = MAX_PIPELINED_REQUESTS 162 self.pipeline_queue = collections.deque() 163 164 # The asyncio.Protocol hooks... 165 def connection_made(self, transport): 166 self.transport = transport 167 168 def connection_lost(self, exc): 169 self.transport = None 170 171 def eof_received(self): 172 pass 173 174 def data_received(self, data): 175 try: 176 self.request_parser.feed_data(data) 177 except httptools.HttpParserUpgrade: 178 websocket_upgrade(self) 179 180 # Flow control... 181 def pause_writing(self): 182 self.write_paused = True 183 184 def resume_writing(self): 185 self.write_paused = False 186 187 def check_pause_reading(self): 188 if self.transport is None or self.read_paused: 189 return 190 if (self.buffer_size > self.high_water_limit or 191 len(self.pipeline_queue) >= self.max_pipelined_requests): 192 self.transport.pause_reading() 193 self.read_paused = True 194 195 def check_resume_reading(self): 196 if self.transport is None or not self.read_paused: 197 return 198 if (self.buffer_size < self.low_water_limit and 199 len(self.pipeline_queue) < self.max_pipelined_requests): 200 self.transport.resume_reading() 201 self.read_paused = False 202 203 # Event hooks called back into by HttpRequestParser... 204 def on_message_begin(self): 205 self.message = self.base_message.copy() 206 self.channels = self.base_channels.copy() 207 self.headers = [] 208 209 def on_url(self, url): 210 parsed = httptools.parse_url(url) 211 method = self.request_parser.get_method() 212 http_version = self.request_parser.get_http_version() 213 self.message.update({ 214 'http_version': http_version, 215 'method': method.decode('ascii'), 216 'path': parsed.path.decode('ascii'), 217 'query_string': parsed.query if parsed.query else b'', 218 'headers': self.headers 219 }) 220 221 def on_header(self, name: bytes, value: bytes): 222 name = name.lower() 223 if name == b'upgrade': 224 self.upgrade = value 225 elif name == b'expect' and value.lower() == b'100-continue': 226 self.transport.write(b'HTTP/1.1 100 Continue\r\n\r\n') 227 self.headers.append([name, value]) 228 229 def on_body(self, body: bytes): 230 if 'body' not in self.channels: 231 self.channels['body'] = BodyChannel(self) 232 if not self.has_active_request: 233 self.loop.create_task(self.consumer(self.message, self.channels)) 234 self.has_active_request = True 235 else: 236 self.pipeline_queue.append((self.message, self.channels)) 237 self.check_pause_reading() 238 message = { 239 'content': body, 240 'more_content': True 241 } 242 self.channels['body']._put(message) 243 244 def on_message_complete(self): 245 if self.upgrade is not None: 246 return 247 248 if 'body' not in self.channels: 249 if not self.has_active_request: 250 self.loop.create_task(self.consumer(self.message, self.channels)) 251 self.has_active_request = True 252 else: 253 self.pipeline_queue.append((self.message, self.channels)) 254 self.check_pause_reading() 255 else: 256 message = { 257 'content': b'', 258 'more_content': False 259 } 260 self.channels['body']._put(message) 261 262 def on_chunk_header(self): 263 pass 264 265 def on_chunk_complete(self): 266 pass 267 [end of uvicorn/protocols/http.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/uvicorn/protocols/http.py b/uvicorn/protocols/http.py --- a/uvicorn/protocols/http.py +++ b/uvicorn/protocols/http.py @@ -65,10 +65,11 @@ class ReplyChannel(object): - __slots__ = ['_protocol', 'name'] + __slots__ = ['_protocol', '_use_chunked_encoding', 'name'] def __init__(self, protocol): self._protocol = protocol + self._use_chunked_encoding = False self.name = 'reply:%d' % id(self) async def send(self, message): @@ -82,8 +83,8 @@ await transport.drain() status = message.get('status') - headers = message.get('headers') - content = message.get('content') + headers = message.get('headers', []) + content = message.get('content', b'') more_content = message.get('more_content', False) if status is not None: @@ -92,24 +93,37 @@ SERVER_HEADER, DATE_HEADER, ] - transport.write(b''.join(response)) - - if headers is not None: - response = [] - if content is not None and not more_content: - response = [b'content-length: ', str(len(content)).encode(), b'\r\n'] + seen_content_length = False for header_name, header_value in headers: + if header_name.lower() == b'content-length': + seen_content_length = True response.extend([header_name, b': ', header_value, b'\r\n']) - response.append(b'\r\n') + if not seen_content_length: + if more_content: + self._use_chunked_encoding = True + response.append(b'transfer-encoding: chunked\r\n') + elif status != 204: + response.append(b'content-length: ', str(len(content)).encode(), b'\r\n') + + response.append(b'\r\n') transport.write(b''.join(response)) - if content is not None: - transport.write(content) + if content: + if self._use_chunked_encoding: + transport.write(b'%x\r\n' % len(content)) + transport.write(content) + transport.write(b'\r\n') + else: + transport.write(content) if not more_content: - if (not status) or (not self._protocol.request_parser.should_keep_alive()): + if self._use_chunked_encoding: + transport.write(b'0\r\n\r\n') + self._use_chunked_encoding = False + + if not self._protocol.request_parser.should_keep_alive(): transport.close() elif protocol.pipeline_queue: message, channels = protocol.pipeline_queue.popleft() @@ -258,9 +272,3 @@ 'more_content': False } self.channels['body']._put(message) - - def on_chunk_header(self): - pass - - def on_chunk_complete(self): - pass
{"golden_diff": "diff --git a/uvicorn/protocols/http.py b/uvicorn/protocols/http.py\n--- a/uvicorn/protocols/http.py\n+++ b/uvicorn/protocols/http.py\n@@ -65,10 +65,11 @@\n \n \n class ReplyChannel(object):\n- __slots__ = ['_protocol', 'name']\n+ __slots__ = ['_protocol', '_use_chunked_encoding', 'name']\n \n def __init__(self, protocol):\n self._protocol = protocol\n+ self._use_chunked_encoding = False\n self.name = 'reply:%d' % id(self)\n \n async def send(self, message):\n@@ -82,8 +83,8 @@\n await transport.drain()\n \n status = message.get('status')\n- headers = message.get('headers')\n- content = message.get('content')\n+ headers = message.get('headers', [])\n+ content = message.get('content', b'')\n more_content = message.get('more_content', False)\n \n if status is not None:\n@@ -92,24 +93,37 @@\n SERVER_HEADER,\n DATE_HEADER,\n ]\n- transport.write(b''.join(response))\n-\n- if headers is not None:\n- response = []\n- if content is not None and not more_content:\n- response = [b'content-length: ', str(len(content)).encode(), b'\\r\\n']\n \n+ seen_content_length = False\n for header_name, header_value in headers:\n+ if header_name.lower() == b'content-length':\n+ seen_content_length = True\n response.extend([header_name, b': ', header_value, b'\\r\\n'])\n- response.append(b'\\r\\n')\n \n+ if not seen_content_length:\n+ if more_content:\n+ self._use_chunked_encoding = True\n+ response.append(b'transfer-encoding: chunked\\r\\n')\n+ elif status != 204:\n+ response.append(b'content-length: ', str(len(content)).encode(), b'\\r\\n')\n+\n+ response.append(b'\\r\\n')\n transport.write(b''.join(response))\n \n- if content is not None:\n- transport.write(content)\n+ if content:\n+ if self._use_chunked_encoding:\n+ transport.write(b'%x\\r\\n' % len(content))\n+ transport.write(content)\n+ transport.write(b'\\r\\n')\n+ else:\n+ transport.write(content)\n \n if not more_content:\n- if (not status) or (not self._protocol.request_parser.should_keep_alive()):\n+ if self._use_chunked_encoding:\n+ transport.write(b'0\\r\\n\\r\\n')\n+ self._use_chunked_encoding = False\n+\n+ if not self._protocol.request_parser.should_keep_alive():\n transport.close()\n elif protocol.pipeline_queue:\n message, channels = protocol.pipeline_queue.popleft()\n@@ -258,9 +272,3 @@\n 'more_content': False\n }\n self.channels['body']._put(message)\n-\n- def on_chunk_header(self):\n- pass\n-\n- def on_chunk_complete(self):\n- pass\n", "issue": "Transfer-Encoding: Chunked\nResponses need to support `Transfer-Encoding: Chunked`\n", "before_files": [{"content": "import asyncio\nimport collections\nimport email\nimport http\nimport httptools\nimport os\nimport time\n\nfrom uvicorn.protocols.websocket import websocket_upgrade\n\n\ndef set_time_and_date():\n global CURRENT_TIME\n global DATE_HEADER\n\n CURRENT_TIME = time.time()\n DATE_HEADER = b''.join([\n b'date: ',\n email.utils.formatdate(CURRENT_TIME, usegmt=True).encode(),\n b'\\r\\n'\n ])\n\n\ndef get_status_line(status_code):\n try:\n phrase = http.HTTPStatus(status_code).phrase.encode()\n except ValueError:\n phrase = b''\n return b''.join([\n b'HTTP/1.1 ', str(status_code).encode(), b' ', phrase, b'\\r\\n'\n ])\n\n\nCURRENT_TIME = 0.0\nDATE_HEADER = b''\nSERVER_HEADER = b'server: uvicorn\\r\\n'\nSTATUS_LINE = {\n status_code: get_status_line(status_code) for status_code in range(100, 600)\n}\n\nLOW_WATER_LIMIT = 16384\nHIGH_WATER_LIMIT = 65536\nMAX_PIPELINED_REQUESTS = 20\n\nset_time_and_date()\n\n\nclass BodyChannel(object):\n __slots__ = ['_queue', '_protocol', 'name']\n\n def __init__(self, protocol):\n self._queue = asyncio.Queue()\n self._protocol = protocol\n self.name = 'body:%d' % id(self)\n\n def _put(self, message):\n self._queue.put_nowait(message)\n self._protocol.buffer_size += len(message['content'])\n self._protocol.check_pause_reading()\n\n async def receive(self):\n message = await self._queue.get()\n self._protocol.buffer_size -= len(message['content'])\n self._protocol.check_resume_reading()\n\n\nclass ReplyChannel(object):\n __slots__ = ['_protocol', 'name']\n\n def __init__(self, protocol):\n self._protocol = protocol\n self.name = 'reply:%d' % id(self)\n\n async def send(self, message):\n protocol = self._protocol\n transport = protocol.transport\n\n if transport is None:\n return\n\n if protocol.write_paused:\n await transport.drain()\n\n status = message.get('status')\n headers = message.get('headers')\n content = message.get('content')\n more_content = message.get('more_content', False)\n\n if status is not None:\n response = [\n STATUS_LINE[status],\n SERVER_HEADER,\n DATE_HEADER,\n ]\n transport.write(b''.join(response))\n\n if headers is not None:\n response = []\n if content is not None and not more_content:\n response = [b'content-length: ', str(len(content)).encode(), b'\\r\\n']\n\n for header_name, header_value in headers:\n response.extend([header_name, b': ', header_value, b'\\r\\n'])\n response.append(b'\\r\\n')\n\n transport.write(b''.join(response))\n\n if content is not None:\n transport.write(content)\n\n if not more_content:\n if (not status) or (not self._protocol.request_parser.should_keep_alive()):\n transport.close()\n elif protocol.pipeline_queue:\n message, channels = protocol.pipeline_queue.popleft()\n protocol.loop.create_task(protocol.consumer(message, channels))\n protocol.check_resume_reading()\n else:\n protocol.has_active_request = False\n\n\nclass HttpProtocol(asyncio.Protocol):\n __slots__ = [\n 'consumer', 'loop', 'request_parser',\n 'base_message', 'base_channels',\n 'transport', 'message', 'channels', 'headers', 'upgrade',\n 'read_paused', 'write_paused',\n 'buffer_size', 'high_water_limit', 'low_water_limit',\n 'has_active_request', 'max_pipelined_requests', 'pipeline_queue'\n ]\n\n def __init__(self, consumer, loop, sock, cfg):\n self.consumer = consumer\n self.loop = loop\n self.request_parser = httptools.HttpRequestParser(self)\n\n self.base_message = {\n 'channel': 'http.request',\n 'scheme': 'https' if cfg.is_ssl else 'http',\n 'root_path': os.environ.get('SCRIPT_NAME', ''),\n 'server': sock.getsockname()\n }\n self.base_channels = {\n 'reply': ReplyChannel(self)\n }\n\n self.transport = None\n self.message = None\n self.channels = None\n self.headers = None\n self.upgrade = None\n\n self.read_paused = False\n self.write_paused = False\n\n self.buffer_size = 0\n self.high_water_limit = HIGH_WATER_LIMIT\n self.low_water_limit = LOW_WATER_LIMIT\n\n self.has_active_request = False\n self.max_pipelined_requests = MAX_PIPELINED_REQUESTS\n self.pipeline_queue = collections.deque()\n\n # The asyncio.Protocol hooks...\n def connection_made(self, transport):\n self.transport = transport\n\n def connection_lost(self, exc):\n self.transport = None\n\n def eof_received(self):\n pass\n\n def data_received(self, data):\n try:\n self.request_parser.feed_data(data)\n except httptools.HttpParserUpgrade:\n websocket_upgrade(self)\n\n # Flow control...\n def pause_writing(self):\n self.write_paused = True\n\n def resume_writing(self):\n self.write_paused = False\n\n def check_pause_reading(self):\n if self.transport is None or self.read_paused:\n return\n if (self.buffer_size > self.high_water_limit or\n len(self.pipeline_queue) >= self.max_pipelined_requests):\n self.transport.pause_reading()\n self.read_paused = True\n\n def check_resume_reading(self):\n if self.transport is None or not self.read_paused:\n return\n if (self.buffer_size < self.low_water_limit and\n len(self.pipeline_queue) < self.max_pipelined_requests):\n self.transport.resume_reading()\n self.read_paused = False\n\n # Event hooks called back into by HttpRequestParser...\n def on_message_begin(self):\n self.message = self.base_message.copy()\n self.channels = self.base_channels.copy()\n self.headers = []\n\n def on_url(self, url):\n parsed = httptools.parse_url(url)\n method = self.request_parser.get_method()\n http_version = self.request_parser.get_http_version()\n self.message.update({\n 'http_version': http_version,\n 'method': method.decode('ascii'),\n 'path': parsed.path.decode('ascii'),\n 'query_string': parsed.query if parsed.query else b'',\n 'headers': self.headers\n })\n\n def on_header(self, name: bytes, value: bytes):\n name = name.lower()\n if name == b'upgrade':\n self.upgrade = value\n elif name == b'expect' and value.lower() == b'100-continue':\n self.transport.write(b'HTTP/1.1 100 Continue\\r\\n\\r\\n')\n self.headers.append([name, value])\n\n def on_body(self, body: bytes):\n if 'body' not in self.channels:\n self.channels['body'] = BodyChannel(self)\n if not self.has_active_request:\n self.loop.create_task(self.consumer(self.message, self.channels))\n self.has_active_request = True\n else:\n self.pipeline_queue.append((self.message, self.channels))\n self.check_pause_reading()\n message = {\n 'content': body,\n 'more_content': True\n }\n self.channels['body']._put(message)\n\n def on_message_complete(self):\n if self.upgrade is not None:\n return\n\n if 'body' not in self.channels:\n if not self.has_active_request:\n self.loop.create_task(self.consumer(self.message, self.channels))\n self.has_active_request = True\n else:\n self.pipeline_queue.append((self.message, self.channels))\n self.check_pause_reading()\n else:\n message = {\n 'content': b'',\n 'more_content': False\n }\n self.channels['body']._put(message)\n\n def on_chunk_header(self):\n pass\n\n def on_chunk_complete(self):\n pass\n", "path": "uvicorn/protocols/http.py"}]}
3,022
690
gh_patches_debug_38061
rasdani/github-patches
git_diff
onnx__onnx-5693
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [Feature request] Expose lexical scope context in Python checker ### System information Latest ### What is the problem that this feature solves? Currently lexical scope context is not exposed in Python onnx.checker. ### Alternatives considered _No response_ ### Describe the feature Follow up of https://github.com/onnx/onnx/pull/4720. Expose lexical scope context in Python onnx.checker. See https://github.com/onnx/onnx/blob/3747442528c820ab8dd41111ef3e9ab1a4da6062/onnx/cpp2py_export.cc#L378 ### Will this influence the current api (Y/N)? Y. Extended parameters will be added. ### Feature Area checker ### Are you willing to contribute it (Y/N) Yes ### Notes _No response_ </issue> <code> [start of onnx/checker.py] 1 # Copyright (c) ONNX Project Contributors 2 # 3 # SPDX-License-Identifier: Apache-2.0 4 """Graph utilities for checking whether an ONNX proto message is legal.""" 5 6 from __future__ import annotations 7 8 __all__ = [ 9 "check_attribute", 10 "check_function", 11 "check_graph", 12 "check_model", 13 "check_node", 14 "check_sparse_tensor", 15 "check_tensor", 16 "check_value_info", 17 "DEFAULT_CONTEXT", 18 "ValidationError", 19 "C", 20 "MAXIMUM_PROTOBUF", 21 ] 22 23 import os 24 import sys 25 from typing import Any, Callable, TypeVar 26 27 from google.protobuf.message import Message 28 29 import onnx.defs 30 import onnx.onnx_cpp2py_export.checker as C # noqa: N812 31 import onnx.shape_inference 32 from onnx import ( 33 IR_VERSION, 34 AttributeProto, 35 FunctionProto, 36 GraphProto, 37 ModelProto, 38 NodeProto, 39 SparseTensorProto, 40 TensorProto, 41 ValueInfoProto, 42 helper, 43 ) 44 45 # Limitation of single protobuf file is 2GB 46 MAXIMUM_PROTOBUF = 2000000000 47 48 # TODO: This thing where we reserialize the protobuf back into the 49 # string, only to deserialize it at the call site, is really goofy. 50 # Stop doing that. 51 52 53 # NB: Please don't edit this context! 54 DEFAULT_CONTEXT = C.CheckerContext() 55 DEFAULT_CONTEXT.ir_version = IR_VERSION 56 # TODO: Maybe ONNX-ML should also be defaulted? 57 DEFAULT_CONTEXT.opset_imports = {"": onnx.defs.onnx_opset_version()} 58 59 60 FuncType = TypeVar("FuncType", bound=Callable[..., Any]) 61 62 63 def _ensure_proto_type(proto: Message, proto_type: type[Message]) -> None: 64 if not isinstance(proto, proto_type): 65 raise TypeError( 66 f"The proto message needs to be of type '{proto_type.__name__}'" 67 ) 68 69 70 def check_value_info( 71 value_info: ValueInfoProto, ctx: C.CheckerContext = DEFAULT_CONTEXT 72 ) -> None: 73 _ensure_proto_type(value_info, ValueInfoProto) 74 return C.check_value_info(value_info.SerializeToString(), ctx) 75 76 77 def check_tensor(tensor: TensorProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None: 78 _ensure_proto_type(tensor, TensorProto) 79 return C.check_tensor(tensor.SerializeToString(), ctx) 80 81 82 def check_attribute( 83 attr: AttributeProto, ctx: C.CheckerContext = DEFAULT_CONTEXT 84 ) -> None: 85 _ensure_proto_type(attr, AttributeProto) 86 return C.check_attribute(attr.SerializeToString(), ctx) 87 88 89 def check_node(node: NodeProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None: 90 _ensure_proto_type(node, NodeProto) 91 return C.check_node(node.SerializeToString(), ctx) 92 93 94 def check_function( 95 function: FunctionProto, ctx: C.CheckerContext | None = None 96 ) -> None: 97 _ensure_proto_type(function, FunctionProto) 98 if ctx is None: 99 ctx = C.CheckerContext() 100 ctx.ir_version = helper.find_min_ir_version_for( 101 list(function.opset_import), True 102 ) 103 function_opset_dic = {} 104 for domain_version in function.opset_import: 105 function_opset_dic[domain_version.domain] = domain_version.version 106 ctx.opset_imports = function_opset_dic 107 C.check_function(function.SerializeToString(), ctx) 108 109 110 def check_graph(graph: GraphProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None: 111 _ensure_proto_type(graph, GraphProto) 112 return C.check_graph(graph.SerializeToString(), ctx) 113 114 115 def check_sparse_tensor( 116 sparse: SparseTensorProto, ctx: C.CheckerContext = DEFAULT_CONTEXT 117 ) -> None: 118 _ensure_proto_type(sparse, SparseTensorProto) 119 C.check_sparse_tensor(sparse.SerializeToString(), ctx) 120 121 122 def check_model( 123 model: ModelProto | str | bytes | os.PathLike, 124 full_check: bool = False, 125 skip_opset_compatibility_check: bool = False, 126 ) -> None: 127 """Check the consistency of a model. 128 129 An exception will be raised if the model's ir_version is not set 130 properly or is higher than checker's ir_version, or if the model 131 has duplicate keys in metadata_props. 132 133 If IR version >= 3, the model must specify opset_import. 134 If IR version < 3, the model cannot have any opset_import specified. 135 136 Args: 137 model: Model to check. If model is a path, the function checks model 138 path first. If the model bytes size is larger than 2GB, function 139 should be called using model path. 140 full_check: If True, the function also runs shape inference check. 141 skip_opset_compatibility_check: If True, the function skips the check for 142 opset compatibility. 143 """ 144 # If model is a path instead of ModelProto 145 if isinstance(model, (str, os.PathLike)): 146 C.check_model_path(os.fspath(model), full_check, skip_opset_compatibility_check) 147 else: 148 protobuf_string = ( 149 model if isinstance(model, bytes) else model.SerializeToString() 150 ) 151 # If the protobuf is larger than 2GB, 152 # remind users should use the model path to check 153 if sys.getsizeof(protobuf_string) > MAXIMUM_PROTOBUF: 154 raise ValueError( 155 "This protobuf of onnx model is too large (>2GB). Call check_model with model path instead." 156 ) 157 C.check_model(protobuf_string, full_check, skip_opset_compatibility_check) 158 159 160 ValidationError = C.ValidationError 161 [end of onnx/checker.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/onnx/checker.py b/onnx/checker.py --- a/onnx/checker.py +++ b/onnx/checker.py @@ -15,6 +15,7 @@ "check_tensor", "check_value_info", "DEFAULT_CONTEXT", + "LEXICAL_SCOPE_CONTEXT", "ValidationError", "C", "MAXIMUM_PROTOBUF", @@ -39,7 +40,6 @@ SparseTensorProto, TensorProto, ValueInfoProto, - helper, ) # Limitation of single protobuf file is 2GB @@ -56,6 +56,8 @@ # TODO: Maybe ONNX-ML should also be defaulted? DEFAULT_CONTEXT.opset_imports = {"": onnx.defs.onnx_opset_version()} +LEXICAL_SCOPE_CONTEXT = C.LexicalScopeContext() + FuncType = TypeVar("FuncType", bound=Callable[..., Any]) @@ -80,36 +82,39 @@ def check_attribute( - attr: AttributeProto, ctx: C.CheckerContext = DEFAULT_CONTEXT + attr: AttributeProto, + ctx: C.CheckerContext = DEFAULT_CONTEXT, + lex_ctx: C.LexicalScopeContext = LEXICAL_SCOPE_CONTEXT, ) -> None: _ensure_proto_type(attr, AttributeProto) - return C.check_attribute(attr.SerializeToString(), ctx) + return C.check_attribute(attr.SerializeToString(), ctx, lex_ctx) -def check_node(node: NodeProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None: +def check_node( + node: NodeProto, + ctx: C.CheckerContext = DEFAULT_CONTEXT, + lex_ctx: C.LexicalScopeContext = LEXICAL_SCOPE_CONTEXT, +) -> None: _ensure_proto_type(node, NodeProto) - return C.check_node(node.SerializeToString(), ctx) + return C.check_node(node.SerializeToString(), ctx, lex_ctx) def check_function( - function: FunctionProto, ctx: C.CheckerContext | None = None + function: FunctionProto, + ctx: C.CheckerContext, + lex_ctx: C.LexicalScopeContext, ) -> None: _ensure_proto_type(function, FunctionProto) - if ctx is None: - ctx = C.CheckerContext() - ctx.ir_version = helper.find_min_ir_version_for( - list(function.opset_import), True - ) - function_opset_dic = {} - for domain_version in function.opset_import: - function_opset_dic[domain_version.domain] = domain_version.version - ctx.opset_imports = function_opset_dic - C.check_function(function.SerializeToString(), ctx) + C.check_function(function.SerializeToString(), ctx, lex_ctx) -def check_graph(graph: GraphProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None: +def check_graph( + graph: GraphProto, + ctx: C.CheckerContext = DEFAULT_CONTEXT, + lex_ctx: C.LexicalScopeContext = LEXICAL_SCOPE_CONTEXT, +) -> None: _ensure_proto_type(graph, GraphProto) - return C.check_graph(graph.SerializeToString(), ctx) + return C.check_graph(graph.SerializeToString(), ctx, lex_ctx) def check_sparse_tensor(
{"golden_diff": "diff --git a/onnx/checker.py b/onnx/checker.py\n--- a/onnx/checker.py\n+++ b/onnx/checker.py\n@@ -15,6 +15,7 @@\n \"check_tensor\",\n \"check_value_info\",\n \"DEFAULT_CONTEXT\",\n+ \"LEXICAL_SCOPE_CONTEXT\",\n \"ValidationError\",\n \"C\",\n \"MAXIMUM_PROTOBUF\",\n@@ -39,7 +40,6 @@\n SparseTensorProto,\n TensorProto,\n ValueInfoProto,\n- helper,\n )\n \n # Limitation of single protobuf file is 2GB\n@@ -56,6 +56,8 @@\n # TODO: Maybe ONNX-ML should also be defaulted?\n DEFAULT_CONTEXT.opset_imports = {\"\": onnx.defs.onnx_opset_version()}\n \n+LEXICAL_SCOPE_CONTEXT = C.LexicalScopeContext()\n+\n \n FuncType = TypeVar(\"FuncType\", bound=Callable[..., Any])\n \n@@ -80,36 +82,39 @@\n \n \n def check_attribute(\n- attr: AttributeProto, ctx: C.CheckerContext = DEFAULT_CONTEXT\n+ attr: AttributeProto,\n+ ctx: C.CheckerContext = DEFAULT_CONTEXT,\n+ lex_ctx: C.LexicalScopeContext = LEXICAL_SCOPE_CONTEXT,\n ) -> None:\n _ensure_proto_type(attr, AttributeProto)\n- return C.check_attribute(attr.SerializeToString(), ctx)\n+ return C.check_attribute(attr.SerializeToString(), ctx, lex_ctx)\n \n \n-def check_node(node: NodeProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n+def check_node(\n+ node: NodeProto,\n+ ctx: C.CheckerContext = DEFAULT_CONTEXT,\n+ lex_ctx: C.LexicalScopeContext = LEXICAL_SCOPE_CONTEXT,\n+) -> None:\n _ensure_proto_type(node, NodeProto)\n- return C.check_node(node.SerializeToString(), ctx)\n+ return C.check_node(node.SerializeToString(), ctx, lex_ctx)\n \n \n def check_function(\n- function: FunctionProto, ctx: C.CheckerContext | None = None\n+ function: FunctionProto,\n+ ctx: C.CheckerContext,\n+ lex_ctx: C.LexicalScopeContext,\n ) -> None:\n _ensure_proto_type(function, FunctionProto)\n- if ctx is None:\n- ctx = C.CheckerContext()\n- ctx.ir_version = helper.find_min_ir_version_for(\n- list(function.opset_import), True\n- )\n- function_opset_dic = {}\n- for domain_version in function.opset_import:\n- function_opset_dic[domain_version.domain] = domain_version.version\n- ctx.opset_imports = function_opset_dic\n- C.check_function(function.SerializeToString(), ctx)\n+ C.check_function(function.SerializeToString(), ctx, lex_ctx)\n \n \n-def check_graph(graph: GraphProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n+def check_graph(\n+ graph: GraphProto,\n+ ctx: C.CheckerContext = DEFAULT_CONTEXT,\n+ lex_ctx: C.LexicalScopeContext = LEXICAL_SCOPE_CONTEXT,\n+) -> None:\n _ensure_proto_type(graph, GraphProto)\n- return C.check_graph(graph.SerializeToString(), ctx)\n+ return C.check_graph(graph.SerializeToString(), ctx, lex_ctx)\n \n \n def check_sparse_tensor(\n", "issue": "[Feature request] Expose lexical scope context in Python checker\n### System information\n\nLatest\n\n### What is the problem that this feature solves?\n\nCurrently lexical scope context is not exposed in Python onnx.checker.\n\n### Alternatives considered\n\n_No response_\n\n### Describe the feature\n\nFollow up of https://github.com/onnx/onnx/pull/4720. Expose lexical scope context in Python onnx.checker. See https://github.com/onnx/onnx/blob/3747442528c820ab8dd41111ef3e9ab1a4da6062/onnx/cpp2py_export.cc#L378\n\n### Will this influence the current api (Y/N)?\n\nY. Extended parameters will be added.\n\n### Feature Area\n\nchecker\n\n### Are you willing to contribute it (Y/N)\n\nYes\n\n### Notes\n\n_No response_\n", "before_files": [{"content": "# Copyright (c) ONNX Project Contributors\n#\n# SPDX-License-Identifier: Apache-2.0\n\"\"\"Graph utilities for checking whether an ONNX proto message is legal.\"\"\"\n\nfrom __future__ import annotations\n\n__all__ = [\n \"check_attribute\",\n \"check_function\",\n \"check_graph\",\n \"check_model\",\n \"check_node\",\n \"check_sparse_tensor\",\n \"check_tensor\",\n \"check_value_info\",\n \"DEFAULT_CONTEXT\",\n \"ValidationError\",\n \"C\",\n \"MAXIMUM_PROTOBUF\",\n]\n\nimport os\nimport sys\nfrom typing import Any, Callable, TypeVar\n\nfrom google.protobuf.message import Message\n\nimport onnx.defs\nimport onnx.onnx_cpp2py_export.checker as C # noqa: N812\nimport onnx.shape_inference\nfrom onnx import (\n IR_VERSION,\n AttributeProto,\n FunctionProto,\n GraphProto,\n ModelProto,\n NodeProto,\n SparseTensorProto,\n TensorProto,\n ValueInfoProto,\n helper,\n)\n\n# Limitation of single protobuf file is 2GB\nMAXIMUM_PROTOBUF = 2000000000\n\n# TODO: This thing where we reserialize the protobuf back into the\n# string, only to deserialize it at the call site, is really goofy.\n# Stop doing that.\n\n\n# NB: Please don't edit this context!\nDEFAULT_CONTEXT = C.CheckerContext()\nDEFAULT_CONTEXT.ir_version = IR_VERSION\n# TODO: Maybe ONNX-ML should also be defaulted?\nDEFAULT_CONTEXT.opset_imports = {\"\": onnx.defs.onnx_opset_version()}\n\n\nFuncType = TypeVar(\"FuncType\", bound=Callable[..., Any])\n\n\ndef _ensure_proto_type(proto: Message, proto_type: type[Message]) -> None:\n if not isinstance(proto, proto_type):\n raise TypeError(\n f\"The proto message needs to be of type '{proto_type.__name__}'\"\n )\n\n\ndef check_value_info(\n value_info: ValueInfoProto, ctx: C.CheckerContext = DEFAULT_CONTEXT\n) -> None:\n _ensure_proto_type(value_info, ValueInfoProto)\n return C.check_value_info(value_info.SerializeToString(), ctx)\n\n\ndef check_tensor(tensor: TensorProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n _ensure_proto_type(tensor, TensorProto)\n return C.check_tensor(tensor.SerializeToString(), ctx)\n\n\ndef check_attribute(\n attr: AttributeProto, ctx: C.CheckerContext = DEFAULT_CONTEXT\n) -> None:\n _ensure_proto_type(attr, AttributeProto)\n return C.check_attribute(attr.SerializeToString(), ctx)\n\n\ndef check_node(node: NodeProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n _ensure_proto_type(node, NodeProto)\n return C.check_node(node.SerializeToString(), ctx)\n\n\ndef check_function(\n function: FunctionProto, ctx: C.CheckerContext | None = None\n) -> None:\n _ensure_proto_type(function, FunctionProto)\n if ctx is None:\n ctx = C.CheckerContext()\n ctx.ir_version = helper.find_min_ir_version_for(\n list(function.opset_import), True\n )\n function_opset_dic = {}\n for domain_version in function.opset_import:\n function_opset_dic[domain_version.domain] = domain_version.version\n ctx.opset_imports = function_opset_dic\n C.check_function(function.SerializeToString(), ctx)\n\n\ndef check_graph(graph: GraphProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n _ensure_proto_type(graph, GraphProto)\n return C.check_graph(graph.SerializeToString(), ctx)\n\n\ndef check_sparse_tensor(\n sparse: SparseTensorProto, ctx: C.CheckerContext = DEFAULT_CONTEXT\n) -> None:\n _ensure_proto_type(sparse, SparseTensorProto)\n C.check_sparse_tensor(sparse.SerializeToString(), ctx)\n\n\ndef check_model(\n model: ModelProto | str | bytes | os.PathLike,\n full_check: bool = False,\n skip_opset_compatibility_check: bool = False,\n) -> None:\n \"\"\"Check the consistency of a model.\n\n An exception will be raised if the model's ir_version is not set\n properly or is higher than checker's ir_version, or if the model\n has duplicate keys in metadata_props.\n\n If IR version >= 3, the model must specify opset_import.\n If IR version < 3, the model cannot have any opset_import specified.\n\n Args:\n model: Model to check. If model is a path, the function checks model\n path first. If the model bytes size is larger than 2GB, function\n should be called using model path.\n full_check: If True, the function also runs shape inference check.\n skip_opset_compatibility_check: If True, the function skips the check for\n opset compatibility.\n \"\"\"\n # If model is a path instead of ModelProto\n if isinstance(model, (str, os.PathLike)):\n C.check_model_path(os.fspath(model), full_check, skip_opset_compatibility_check)\n else:\n protobuf_string = (\n model if isinstance(model, bytes) else model.SerializeToString()\n )\n # If the protobuf is larger than 2GB,\n # remind users should use the model path to check\n if sys.getsizeof(protobuf_string) > MAXIMUM_PROTOBUF:\n raise ValueError(\n \"This protobuf of onnx model is too large (>2GB). Call check_model with model path instead.\"\n )\n C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)\n\n\nValidationError = C.ValidationError\n", "path": "onnx/checker.py"}]}
2,310
713
gh_patches_debug_27826
rasdani/github-patches
git_diff
qtile__qtile-3863
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bluetooth widget displays adapter name instead of name of connected device ### The issue: version: 0.21.0 log: no relevant log I configured the bluetooth-widget. When a device is connected, it shows the adapter name, instead of the device name. ### Required: - [X] I have searched past issues to see if this bug has already been reported. </issue> <code> [start of libqtile/widget/bluetooth.py] 1 # Copyright (c) 2021 Graeme Holliday 2 # 3 # Permission is hereby granted, free of charge, to any person obtaining a copy 4 # of this software and associated documentation files (the "Software"), to deal 5 # in the Software without restriction, including without limitation the rights 6 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 # copies of the Software, and to permit persons to whom the Software is 8 # furnished to do so, subject to the following conditions: 9 # 10 # The above copyright notice and this permission notice shall be included in 11 # all copies or substantial portions of the Software. 12 # 13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 # SOFTWARE. 20 21 from dbus_next.aio import MessageBus 22 from dbus_next.constants import BusType 23 24 from libqtile.widget import base 25 26 BLUEZ = "org.bluez" 27 BLUEZ_PATH = "/org/bluez/hci0" 28 BLUEZ_ADAPTER = "org.bluez.Adapter1" 29 BLUEZ_DEVICE = "org.bluez.Device1" 30 BLUEZ_PROPERTIES = "org.freedesktop.DBus.Properties" 31 32 33 class Bluetooth(base._TextBox): 34 """ 35 Displays bluetooth status for a particular connected device. 36 37 (For example your bluetooth headphones.) 38 39 Uses dbus-next to communicate with the system bus. 40 41 Widget requirements: dbus-next_. 42 43 .. _dbus-next: https://pypi.org/project/dbus-next/ 44 """ 45 46 defaults = [ 47 ( 48 "hci", 49 "/dev_XX_XX_XX_XX_XX_XX", 50 "hci0 device path, can be found with d-feet or similar dbus explorer.", 51 ) 52 ] 53 54 def __init__(self, **config): 55 base._TextBox.__init__(self, "", **config) 56 self.add_defaults(Bluetooth.defaults) 57 58 async def _config_async(self): 59 # set initial values 60 self.powered = await self._init_adapter() 61 self.connected, self.device = await self._init_device() 62 63 self.update_text() 64 65 async def _init_adapter(self): 66 # set up interface to adapter properties using high-level api 67 bus = await MessageBus(bus_type=BusType.SYSTEM).connect() 68 introspect = await bus.introspect(BLUEZ, BLUEZ_PATH) 69 obj = bus.get_proxy_object(BLUEZ, BLUEZ_PATH, introspect) 70 iface = obj.get_interface(BLUEZ_ADAPTER) 71 props = obj.get_interface(BLUEZ_PROPERTIES) 72 73 powered = await iface.get_powered() 74 # subscribe receiver to property changed 75 props.on_properties_changed(self._signal_received) 76 return powered 77 78 async def _init_device(self): 79 # set up interface to device properties using high-level api 80 bus = await MessageBus(bus_type=BusType.SYSTEM).connect() 81 introspect = await bus.introspect(BLUEZ, BLUEZ_PATH + self.hci) 82 obj = bus.get_proxy_object(BLUEZ, BLUEZ_PATH + self.hci, introspect) 83 iface = obj.get_interface(BLUEZ_DEVICE) 84 props = obj.get_interface(BLUEZ_PROPERTIES) 85 86 connected = await iface.get_connected() 87 name = await iface.get_name() 88 # subscribe receiver to property changed 89 props.on_properties_changed(self._signal_received) 90 return connected, name 91 92 def _signal_received(self, interface_name, changed_properties, _invalidated_properties): 93 powered = changed_properties.get("Powered", None) 94 if powered is not None: 95 self.powered = powered.value 96 self.update_text() 97 98 connected = changed_properties.get("Connected", None) 99 if connected is not None: 100 self.connected = connected.value 101 self.update_text() 102 103 device = changed_properties.get("Name", None) 104 if device is not None: 105 self.device = device.value 106 self.update_text() 107 108 def update_text(self): 109 text = "" 110 if not self.powered: 111 text = "off" 112 else: 113 if not self.connected: 114 text = "on" 115 else: 116 text = self.device 117 self.update(text) 118 [end of libqtile/widget/bluetooth.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/libqtile/widget/bluetooth.py b/libqtile/widget/bluetooth.py --- a/libqtile/widget/bluetooth.py +++ b/libqtile/widget/bluetooth.py @@ -72,7 +72,7 @@ powered = await iface.get_powered() # subscribe receiver to property changed - props.on_properties_changed(self._signal_received) + props.on_properties_changed(self._adapter_signal_received) return powered async def _init_device(self): @@ -86,15 +86,20 @@ connected = await iface.get_connected() name = await iface.get_name() # subscribe receiver to property changed - props.on_properties_changed(self._signal_received) + props.on_properties_changed(self._device_signal_received) return connected, name - def _signal_received(self, interface_name, changed_properties, _invalidated_properties): + def _adapter_signal_received( + self, interface_name, changed_properties, _invalidated_properties + ): powered = changed_properties.get("Powered", None) if powered is not None: self.powered = powered.value self.update_text() + def _device_signal_received( + self, interface_name, changed_properties, _invalidated_properties + ): connected = changed_properties.get("Connected", None) if connected is not None: self.connected = connected.value
{"golden_diff": "diff --git a/libqtile/widget/bluetooth.py b/libqtile/widget/bluetooth.py\n--- a/libqtile/widget/bluetooth.py\n+++ b/libqtile/widget/bluetooth.py\n@@ -72,7 +72,7 @@\n \n powered = await iface.get_powered()\n # subscribe receiver to property changed\n- props.on_properties_changed(self._signal_received)\n+ props.on_properties_changed(self._adapter_signal_received)\n return powered\n \n async def _init_device(self):\n@@ -86,15 +86,20 @@\n connected = await iface.get_connected()\n name = await iface.get_name()\n # subscribe receiver to property changed\n- props.on_properties_changed(self._signal_received)\n+ props.on_properties_changed(self._device_signal_received)\n return connected, name\n \n- def _signal_received(self, interface_name, changed_properties, _invalidated_properties):\n+ def _adapter_signal_received(\n+ self, interface_name, changed_properties, _invalidated_properties\n+ ):\n powered = changed_properties.get(\"Powered\", None)\n if powered is not None:\n self.powered = powered.value\n self.update_text()\n \n+ def _device_signal_received(\n+ self, interface_name, changed_properties, _invalidated_properties\n+ ):\n connected = changed_properties.get(\"Connected\", None)\n if connected is not None:\n self.connected = connected.value\n", "issue": "Bluetooth widget displays adapter name instead of name of connected device\n### The issue:\n\nversion: 0.21.0\r\nlog: no relevant log\r\n\r\nI configured the bluetooth-widget.\r\nWhen a device is connected, it shows the adapter name, instead of the device name.\n\n### Required:\n\n- [X] I have searched past issues to see if this bug has already been reported.\n", "before_files": [{"content": "# Copyright (c) 2021 Graeme Holliday\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom dbus_next.aio import MessageBus\nfrom dbus_next.constants import BusType\n\nfrom libqtile.widget import base\n\nBLUEZ = \"org.bluez\"\nBLUEZ_PATH = \"/org/bluez/hci0\"\nBLUEZ_ADAPTER = \"org.bluez.Adapter1\"\nBLUEZ_DEVICE = \"org.bluez.Device1\"\nBLUEZ_PROPERTIES = \"org.freedesktop.DBus.Properties\"\n\n\nclass Bluetooth(base._TextBox):\n \"\"\"\n Displays bluetooth status for a particular connected device.\n\n (For example your bluetooth headphones.)\n\n Uses dbus-next to communicate with the system bus.\n\n Widget requirements: dbus-next_.\n\n .. _dbus-next: https://pypi.org/project/dbus-next/\n \"\"\"\n\n defaults = [\n (\n \"hci\",\n \"/dev_XX_XX_XX_XX_XX_XX\",\n \"hci0 device path, can be found with d-feet or similar dbus explorer.\",\n )\n ]\n\n def __init__(self, **config):\n base._TextBox.__init__(self, \"\", **config)\n self.add_defaults(Bluetooth.defaults)\n\n async def _config_async(self):\n # set initial values\n self.powered = await self._init_adapter()\n self.connected, self.device = await self._init_device()\n\n self.update_text()\n\n async def _init_adapter(self):\n # set up interface to adapter properties using high-level api\n bus = await MessageBus(bus_type=BusType.SYSTEM).connect()\n introspect = await bus.introspect(BLUEZ, BLUEZ_PATH)\n obj = bus.get_proxy_object(BLUEZ, BLUEZ_PATH, introspect)\n iface = obj.get_interface(BLUEZ_ADAPTER)\n props = obj.get_interface(BLUEZ_PROPERTIES)\n\n powered = await iface.get_powered()\n # subscribe receiver to property changed\n props.on_properties_changed(self._signal_received)\n return powered\n\n async def _init_device(self):\n # set up interface to device properties using high-level api\n bus = await MessageBus(bus_type=BusType.SYSTEM).connect()\n introspect = await bus.introspect(BLUEZ, BLUEZ_PATH + self.hci)\n obj = bus.get_proxy_object(BLUEZ, BLUEZ_PATH + self.hci, introspect)\n iface = obj.get_interface(BLUEZ_DEVICE)\n props = obj.get_interface(BLUEZ_PROPERTIES)\n\n connected = await iface.get_connected()\n name = await iface.get_name()\n # subscribe receiver to property changed\n props.on_properties_changed(self._signal_received)\n return connected, name\n\n def _signal_received(self, interface_name, changed_properties, _invalidated_properties):\n powered = changed_properties.get(\"Powered\", None)\n if powered is not None:\n self.powered = powered.value\n self.update_text()\n\n connected = changed_properties.get(\"Connected\", None)\n if connected is not None:\n self.connected = connected.value\n self.update_text()\n\n device = changed_properties.get(\"Name\", None)\n if device is not None:\n self.device = device.value\n self.update_text()\n\n def update_text(self):\n text = \"\"\n if not self.powered:\n text = \"off\"\n else:\n if not self.connected:\n text = \"on\"\n else:\n text = self.device\n self.update(text)\n", "path": "libqtile/widget/bluetooth.py"}]}
1,819
300
gh_patches_debug_5575
rasdani/github-patches
git_diff
mathesar-foundation__mathesar-3047
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add OpenAPI Specification for /databases/ endpoint ## Problem In order to ensure the accuracy of the specs generated by drf-spectacular for /databases/ endpoint , we will generate an OpenAPI Specification specifically for this endpoint. This will allow us to verify its correctness by comparing it with the existing test cases and the DRF browsable API page. ## Proposed solution * Implement custom preprocessing hook function to filter out all endpoints except for the /databases/ endpoint.The hook function selectively retains only the endpoint paths that match the /datafiles/ * Configure the PREPROCESSING_HOOKS setting with the custom hook function, we ensure that only the /datafiles/ endpoint is considered during the OpenAPI specification generation process. * Finally, generate the spec file using the _./manage.py spectacular --color --file schema.yml_ command </issue> <code> [start of config/settings/openapi.py] 1 def custom_preprocessing_hook(endpoints): 2 filtered = [] 3 for (path, path_regex, method, callback) in endpoints: 4 # Remove all but DRF API endpoints 5 if path.startswith("/api/db/v0/data_files/"): 6 filtered.append((path, path_regex, method, callback)) 7 return filtered 8 9 10 def remove_url_prefix_hook(result, **kwargs): 11 # Remove namespace and version URL prefix from the operation Id of the generated API schema 12 for path, path_info in result['paths'].items(): 13 for method, operation in path_info.items(): 14 operation_id = operation.get('operationId') 15 if operation_id: 16 if path.startswith('/api/db/v0/'): 17 operation['operationId'] = operation_id.replace('db_v0_', '') 18 elif path.startswith('/api/ui/v0/'): 19 operation['operationId'] = operation_id.replace('ui_v0_', '') 20 21 return result 22 [end of config/settings/openapi.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/config/settings/openapi.py b/config/settings/openapi.py --- a/config/settings/openapi.py +++ b/config/settings/openapi.py @@ -2,7 +2,7 @@ filtered = [] for (path, path_regex, method, callback) in endpoints: # Remove all but DRF API endpoints - if path.startswith("/api/db/v0/data_files/"): + if path.startswith("/api/db/v0/databases/") or path.startswith("/api/db/v0/data_files/"): filtered.append((path, path_regex, method, callback)) return filtered
{"golden_diff": "diff --git a/config/settings/openapi.py b/config/settings/openapi.py\n--- a/config/settings/openapi.py\n+++ b/config/settings/openapi.py\n@@ -2,7 +2,7 @@\n filtered = []\n for (path, path_regex, method, callback) in endpoints:\n # Remove all but DRF API endpoints\n- if path.startswith(\"/api/db/v0/data_files/\"):\n+ if path.startswith(\"/api/db/v0/databases/\") or path.startswith(\"/api/db/v0/data_files/\"):\n filtered.append((path, path_regex, method, callback))\n return filtered\n", "issue": "Add OpenAPI Specification for /databases/ endpoint \n## Problem\r\nIn order to ensure the accuracy of the specs generated by drf-spectacular for /databases/ endpoint , we will generate an OpenAPI Specification specifically for this endpoint. This will allow us to verify its correctness by comparing it with the existing test cases and the DRF browsable API page.\r\n\r\n## Proposed solution\r\n* Implement custom preprocessing hook function to filter out all endpoints except for the /databases/ endpoint.The hook function selectively retains only the endpoint paths that match the /datafiles/ \r\n* Configure the PREPROCESSING_HOOKS setting with the custom hook function, we ensure that only the /datafiles/ endpoint is considered during the OpenAPI specification generation process.\r\n* Finally, generate the spec file using the _./manage.py spectacular --color --file schema.yml_ command\r\n\r\n\n", "before_files": [{"content": "def custom_preprocessing_hook(endpoints):\n filtered = []\n for (path, path_regex, method, callback) in endpoints:\n # Remove all but DRF API endpoints\n if path.startswith(\"/api/db/v0/data_files/\"):\n filtered.append((path, path_regex, method, callback))\n return filtered\n\n\ndef remove_url_prefix_hook(result, **kwargs):\n # Remove namespace and version URL prefix from the operation Id of the generated API schema\n for path, path_info in result['paths'].items():\n for method, operation in path_info.items():\n operation_id = operation.get('operationId')\n if operation_id:\n if path.startswith('/api/db/v0/'):\n operation['operationId'] = operation_id.replace('db_v0_', '')\n elif path.startswith('/api/ui/v0/'):\n operation['operationId'] = operation_id.replace('ui_v0_', '')\n\n return result\n", "path": "config/settings/openapi.py"}]}
939
127
gh_patches_debug_9178
rasdani/github-patches
git_diff
getmoto__moto-106
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> dynamo query with hash key returns wrong results When executing a query in dynamodb wrong result is returned Step to reproduce: add an insert to the default query_test in the unit tests: ``` @mock_dynamodb def test_query(): conn = boto.connect_dynamodb() table = create_table(conn) item_data = { 'Body': 'http://url_to_lolcat.gif', 'SentBy': 'User A', 'ReceivedTime': '12/9/2011 11:36:03 PM', } # the extra item item = table.new_item( hash_key='the-key1', range_key='4561', attrs=item_data, ) item.put() item = table.new_item( hash_key='the-key', range_key='456', attrs=item_data, ) item.put() item = table.new_item( hash_key='the-key', range_key='123', attrs=item_data, ) item.put() item = table.new_item( hash_key='the-key', range_key='789', attrs=item_data, ) item.put() results = table.query(hash_key='the-key', range_key_condition=condition.GT('1')) results.response['Items'].should.have.length_of(3) ``` This new test will fail as the library will return 4 results in stead of 3 </issue> <code> [start of moto/dynamodb/models.py] 1 from collections import defaultdict 2 import datetime 3 import json 4 5 try: 6 from collections import OrderedDict 7 except ImportError: 8 # python 2.6 or earlier, use backport 9 from ordereddict import OrderedDict 10 11 12 from moto.core import BaseBackend 13 from .comparisons import get_comparison_func 14 from .utils import unix_time 15 16 17 class DynamoJsonEncoder(json.JSONEncoder): 18 def default(self, obj): 19 if hasattr(obj, 'to_json'): 20 return obj.to_json() 21 22 23 def dynamo_json_dump(dynamo_object): 24 return json.dumps(dynamo_object, cls=DynamoJsonEncoder) 25 26 27 class DynamoType(object): 28 """ 29 http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelDataTypes 30 """ 31 32 def __init__(self, type_as_dict): 33 self.type = type_as_dict.keys()[0] 34 self.value = type_as_dict.values()[0] 35 36 def __hash__(self): 37 return hash((self.type, self.value)) 38 39 def __eq__(self, other): 40 return ( 41 self.type == other.type and 42 self.value == other.value 43 ) 44 45 def __repr__(self): 46 return "DynamoType: {0}".format(self.to_json()) 47 48 def to_json(self): 49 return {self.type: self.value} 50 51 def compare(self, range_comparison, range_objs): 52 """ 53 Compares this type against comparison filters 54 """ 55 range_values = [obj.value for obj in range_objs] 56 comparison_func = get_comparison_func(range_comparison) 57 return comparison_func(self.value, *range_values) 58 59 60 class Item(object): 61 def __init__(self, hash_key, hash_key_type, range_key, range_key_type, attrs): 62 self.hash_key = hash_key 63 self.hash_key_type = hash_key_type 64 self.range_key = range_key 65 self.range_key_type = range_key_type 66 67 self.attrs = {} 68 for key, value in attrs.iteritems(): 69 self.attrs[key] = DynamoType(value) 70 71 def __repr__(self): 72 return "Item: {0}".format(self.to_json()) 73 74 def to_json(self): 75 attributes = {} 76 for attribute_key, attribute in self.attrs.iteritems(): 77 attributes[attribute_key] = attribute.value 78 79 return { 80 "Attributes": attributes 81 } 82 83 def describe_attrs(self, attributes): 84 if attributes: 85 included = {} 86 for key, value in self.attrs.iteritems(): 87 if key in attributes: 88 included[key] = value 89 else: 90 included = self.attrs 91 return { 92 "Item": included 93 } 94 95 96 class Table(object): 97 98 def __init__(self, name, hash_key_attr, hash_key_type, 99 range_key_attr=None, range_key_type=None, read_capacity=None, 100 write_capacity=None): 101 self.name = name 102 self.hash_key_attr = hash_key_attr 103 self.hash_key_type = hash_key_type 104 self.range_key_attr = range_key_attr 105 self.range_key_type = range_key_type 106 self.read_capacity = read_capacity 107 self.write_capacity = write_capacity 108 self.created_at = datetime.datetime.now() 109 self.items = defaultdict(dict) 110 111 @property 112 def has_range_key(self): 113 return self.range_key_attr is not None 114 115 @property 116 def describe(self): 117 results = { 118 "Table": { 119 "CreationDateTime": unix_time(self.created_at), 120 "KeySchema": { 121 "HashKeyElement": { 122 "AttributeName": self.hash_key_attr, 123 "AttributeType": self.hash_key_type 124 }, 125 }, 126 "ProvisionedThroughput": { 127 "ReadCapacityUnits": self.read_capacity, 128 "WriteCapacityUnits": self.write_capacity 129 }, 130 "TableName": self.name, 131 "TableStatus": "ACTIVE", 132 "ItemCount": len(self), 133 "TableSizeBytes": 0, 134 } 135 } 136 if self.has_range_key: 137 results["Table"]["KeySchema"]["RangeKeyElement"] = { 138 "AttributeName": self.range_key_attr, 139 "AttributeType": self.range_key_type 140 } 141 return results 142 143 def __len__(self): 144 count = 0 145 for key, value in self.items.iteritems(): 146 if self.has_range_key: 147 count += len(value) 148 else: 149 count += 1 150 return count 151 152 def __nonzero__(self): 153 return True 154 155 def put_item(self, item_attrs): 156 hash_value = DynamoType(item_attrs.get(self.hash_key_attr)) 157 if self.has_range_key: 158 range_value = DynamoType(item_attrs.get(self.range_key_attr)) 159 else: 160 range_value = None 161 162 item = Item(hash_value, self.hash_key_type, range_value, self.range_key_type, item_attrs) 163 164 if range_value: 165 self.items[hash_value][range_value] = item 166 else: 167 self.items[hash_value] = item 168 return item 169 170 def get_item(self, hash_key, range_key): 171 if self.has_range_key and not range_key: 172 raise ValueError("Table has a range key, but no range key was passed into get_item") 173 try: 174 if range_key: 175 return self.items[hash_key][range_key] 176 else: 177 return self.items[hash_key] 178 except KeyError: 179 return None 180 181 def query(self, hash_key, range_comparison, range_objs): 182 results = [] 183 last_page = True # Once pagination is implemented, change this 184 185 possible_results = list(self.all_items()) 186 if range_comparison: 187 for result in possible_results: 188 if result.range_key.compare(range_comparison, range_objs): 189 results.append(result) 190 else: 191 # If we're not filtering on range key, return all values 192 results = possible_results 193 return results, last_page 194 195 def all_items(self): 196 for hash_set in self.items.values(): 197 if self.range_key_attr: 198 for item in hash_set.values(): 199 yield item 200 else: 201 yield hash_set 202 203 def scan(self, filters): 204 results = [] 205 scanned_count = 0 206 last_page = True # Once pagination is implemented, change this 207 208 for result in self.all_items(): 209 scanned_count += 1 210 passes_all_conditions = True 211 for attribute_name, (comparison_operator, comparison_objs) in filters.iteritems(): 212 attribute = result.attrs.get(attribute_name) 213 214 if attribute: 215 # Attribute found 216 if not attribute.compare(comparison_operator, comparison_objs): 217 passes_all_conditions = False 218 break 219 elif comparison_operator == 'NULL': 220 # Comparison is NULL and we don't have the attribute 221 continue 222 else: 223 # No attribute found and comparison is no NULL. This item fails 224 passes_all_conditions = False 225 break 226 227 if passes_all_conditions: 228 results.append(result) 229 230 return results, scanned_count, last_page 231 232 def delete_item(self, hash_key, range_key): 233 try: 234 if range_key: 235 return self.items[hash_key].pop(range_key) 236 else: 237 return self.items.pop(hash_key) 238 except KeyError: 239 return None 240 241 242 class DynamoDBBackend(BaseBackend): 243 244 def __init__(self): 245 self.tables = OrderedDict() 246 247 def create_table(self, name, **params): 248 table = Table(name, **params) 249 self.tables[name] = table 250 return table 251 252 def delete_table(self, name): 253 return self.tables.pop(name, None) 254 255 def update_table_throughput(self, name, new_read_units, new_write_units): 256 table = self.tables[name] 257 table.read_capacity = new_read_units 258 table.write_capacity = new_write_units 259 return table 260 261 def put_item(self, table_name, item_attrs): 262 table = self.tables.get(table_name) 263 if not table: 264 return None 265 266 return table.put_item(item_attrs) 267 268 def get_item(self, table_name, hash_key_dict, range_key_dict): 269 table = self.tables.get(table_name) 270 if not table: 271 return None 272 273 hash_key = DynamoType(hash_key_dict) 274 range_key = DynamoType(range_key_dict) if range_key_dict else None 275 276 return table.get_item(hash_key, range_key) 277 278 def query(self, table_name, hash_key_dict, range_comparison, range_value_dicts): 279 table = self.tables.get(table_name) 280 if not table: 281 return None, None 282 283 hash_key = DynamoType(hash_key_dict) 284 range_values = [DynamoType(range_value) for range_value in range_value_dicts] 285 286 return table.query(hash_key, range_comparison, range_values) 287 288 def scan(self, table_name, filters): 289 table = self.tables.get(table_name) 290 if not table: 291 return None, None, None 292 293 scan_filters = {} 294 for key, (comparison_operator, comparison_values) in filters.iteritems(): 295 dynamo_types = [DynamoType(value) for value in comparison_values] 296 scan_filters[key] = (comparison_operator, dynamo_types) 297 298 return table.scan(scan_filters) 299 300 def delete_item(self, table_name, hash_key_dict, range_key_dict): 301 table = self.tables.get(table_name) 302 if not table: 303 return None 304 305 hash_key = DynamoType(hash_key_dict) 306 range_key = DynamoType(range_key_dict) if range_key_dict else None 307 308 return table.delete_item(hash_key, range_key) 309 310 311 dynamodb_backend = DynamoDBBackend() 312 [end of moto/dynamodb/models.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/moto/dynamodb/models.py b/moto/dynamodb/models.py --- a/moto/dynamodb/models.py +++ b/moto/dynamodb/models.py @@ -182,7 +182,11 @@ results = [] last_page = True # Once pagination is implemented, change this - possible_results = list(self.all_items()) + if self.range_key_attr: + possible_results = self.items[hash_key].values() + else: + possible_results = list(self.all_items()) + if range_comparison: for result in possible_results: if result.range_key.compare(range_comparison, range_objs):
{"golden_diff": "diff --git a/moto/dynamodb/models.py b/moto/dynamodb/models.py\n--- a/moto/dynamodb/models.py\n+++ b/moto/dynamodb/models.py\n@@ -182,7 +182,11 @@\n results = []\n last_page = True # Once pagination is implemented, change this\n \n- possible_results = list(self.all_items())\n+ if self.range_key_attr:\n+ possible_results = self.items[hash_key].values()\n+ else:\n+ possible_results = list(self.all_items())\n+\n if range_comparison:\n for result in possible_results:\n if result.range_key.compare(range_comparison, range_objs):\n", "issue": "dynamo query with hash key returns wrong results\nWhen executing a query in dynamodb wrong result is returned\n\nStep to reproduce:\nadd an insert to the default query_test in the unit tests:\n\n```\n@mock_dynamodb\ndef test_query():\n conn = boto.connect_dynamodb()\n table = create_table(conn)\n\n item_data = {\n 'Body': 'http://url_to_lolcat.gif',\n 'SentBy': 'User A',\n 'ReceivedTime': '12/9/2011 11:36:03 PM',\n }\n # the extra item\n item = table.new_item(\n hash_key='the-key1',\n range_key='4561',\n attrs=item_data,\n )\n item.put()\n\n item = table.new_item(\n hash_key='the-key',\n range_key='456',\n attrs=item_data,\n )\n item.put()\n\n item = table.new_item(\n hash_key='the-key',\n range_key='123',\n attrs=item_data,\n )\n item.put()\n\n item = table.new_item(\n hash_key='the-key',\n range_key='789',\n attrs=item_data,\n )\n item.put()\n\n results = table.query(hash_key='the-key', range_key_condition=condition.GT('1'))\n results.response['Items'].should.have.length_of(3)\n```\n\nThis new test will fail as the library will return 4 results in stead of 3\n\n", "before_files": [{"content": "from collections import defaultdict\nimport datetime\nimport json\n\ntry:\n from collections import OrderedDict\nexcept ImportError:\n # python 2.6 or earlier, use backport\n from ordereddict import OrderedDict\n\n\nfrom moto.core import BaseBackend\nfrom .comparisons import get_comparison_func\nfrom .utils import unix_time\n\n\nclass DynamoJsonEncoder(json.JSONEncoder):\n def default(self, obj):\n if hasattr(obj, 'to_json'):\n return obj.to_json()\n\n\ndef dynamo_json_dump(dynamo_object):\n return json.dumps(dynamo_object, cls=DynamoJsonEncoder)\n\n\nclass DynamoType(object):\n \"\"\"\n http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelDataTypes\n \"\"\"\n\n def __init__(self, type_as_dict):\n self.type = type_as_dict.keys()[0]\n self.value = type_as_dict.values()[0]\n\n def __hash__(self):\n return hash((self.type, self.value))\n\n def __eq__(self, other):\n return (\n self.type == other.type and\n self.value == other.value\n )\n\n def __repr__(self):\n return \"DynamoType: {0}\".format(self.to_json())\n\n def to_json(self):\n return {self.type: self.value}\n\n def compare(self, range_comparison, range_objs):\n \"\"\"\n Compares this type against comparison filters\n \"\"\"\n range_values = [obj.value for obj in range_objs]\n comparison_func = get_comparison_func(range_comparison)\n return comparison_func(self.value, *range_values)\n\n\nclass Item(object):\n def __init__(self, hash_key, hash_key_type, range_key, range_key_type, attrs):\n self.hash_key = hash_key\n self.hash_key_type = hash_key_type\n self.range_key = range_key\n self.range_key_type = range_key_type\n\n self.attrs = {}\n for key, value in attrs.iteritems():\n self.attrs[key] = DynamoType(value)\n\n def __repr__(self):\n return \"Item: {0}\".format(self.to_json())\n\n def to_json(self):\n attributes = {}\n for attribute_key, attribute in self.attrs.iteritems():\n attributes[attribute_key] = attribute.value\n\n return {\n \"Attributes\": attributes\n }\n\n def describe_attrs(self, attributes):\n if attributes:\n included = {}\n for key, value in self.attrs.iteritems():\n if key in attributes:\n included[key] = value\n else:\n included = self.attrs\n return {\n \"Item\": included\n }\n\n\nclass Table(object):\n\n def __init__(self, name, hash_key_attr, hash_key_type,\n range_key_attr=None, range_key_type=None, read_capacity=None,\n write_capacity=None):\n self.name = name\n self.hash_key_attr = hash_key_attr\n self.hash_key_type = hash_key_type\n self.range_key_attr = range_key_attr\n self.range_key_type = range_key_type\n self.read_capacity = read_capacity\n self.write_capacity = write_capacity\n self.created_at = datetime.datetime.now()\n self.items = defaultdict(dict)\n\n @property\n def has_range_key(self):\n return self.range_key_attr is not None\n\n @property\n def describe(self):\n results = {\n \"Table\": {\n \"CreationDateTime\": unix_time(self.created_at),\n \"KeySchema\": {\n \"HashKeyElement\": {\n \"AttributeName\": self.hash_key_attr,\n \"AttributeType\": self.hash_key_type\n },\n },\n \"ProvisionedThroughput\": {\n \"ReadCapacityUnits\": self.read_capacity,\n \"WriteCapacityUnits\": self.write_capacity\n },\n \"TableName\": self.name,\n \"TableStatus\": \"ACTIVE\",\n \"ItemCount\": len(self),\n \"TableSizeBytes\": 0,\n }\n }\n if self.has_range_key:\n results[\"Table\"][\"KeySchema\"][\"RangeKeyElement\"] = {\n \"AttributeName\": self.range_key_attr,\n \"AttributeType\": self.range_key_type\n }\n return results\n\n def __len__(self):\n count = 0\n for key, value in self.items.iteritems():\n if self.has_range_key:\n count += len(value)\n else:\n count += 1\n return count\n\n def __nonzero__(self):\n return True\n\n def put_item(self, item_attrs):\n hash_value = DynamoType(item_attrs.get(self.hash_key_attr))\n if self.has_range_key:\n range_value = DynamoType(item_attrs.get(self.range_key_attr))\n else:\n range_value = None\n\n item = Item(hash_value, self.hash_key_type, range_value, self.range_key_type, item_attrs)\n\n if range_value:\n self.items[hash_value][range_value] = item\n else:\n self.items[hash_value] = item\n return item\n\n def get_item(self, hash_key, range_key):\n if self.has_range_key and not range_key:\n raise ValueError(\"Table has a range key, but no range key was passed into get_item\")\n try:\n if range_key:\n return self.items[hash_key][range_key]\n else:\n return self.items[hash_key]\n except KeyError:\n return None\n\n def query(self, hash_key, range_comparison, range_objs):\n results = []\n last_page = True # Once pagination is implemented, change this\n\n possible_results = list(self.all_items())\n if range_comparison:\n for result in possible_results:\n if result.range_key.compare(range_comparison, range_objs):\n results.append(result)\n else:\n # If we're not filtering on range key, return all values\n results = possible_results\n return results, last_page\n\n def all_items(self):\n for hash_set in self.items.values():\n if self.range_key_attr:\n for item in hash_set.values():\n yield item\n else:\n yield hash_set\n\n def scan(self, filters):\n results = []\n scanned_count = 0\n last_page = True # Once pagination is implemented, change this\n\n for result in self.all_items():\n scanned_count += 1\n passes_all_conditions = True\n for attribute_name, (comparison_operator, comparison_objs) in filters.iteritems():\n attribute = result.attrs.get(attribute_name)\n\n if attribute:\n # Attribute found\n if not attribute.compare(comparison_operator, comparison_objs):\n passes_all_conditions = False\n break\n elif comparison_operator == 'NULL':\n # Comparison is NULL and we don't have the attribute\n continue\n else:\n # No attribute found and comparison is no NULL. This item fails\n passes_all_conditions = False\n break\n\n if passes_all_conditions:\n results.append(result)\n\n return results, scanned_count, last_page\n\n def delete_item(self, hash_key, range_key):\n try:\n if range_key:\n return self.items[hash_key].pop(range_key)\n else:\n return self.items.pop(hash_key)\n except KeyError:\n return None\n\n\nclass DynamoDBBackend(BaseBackend):\n\n def __init__(self):\n self.tables = OrderedDict()\n\n def create_table(self, name, **params):\n table = Table(name, **params)\n self.tables[name] = table\n return table\n\n def delete_table(self, name):\n return self.tables.pop(name, None)\n\n def update_table_throughput(self, name, new_read_units, new_write_units):\n table = self.tables[name]\n table.read_capacity = new_read_units\n table.write_capacity = new_write_units\n return table\n\n def put_item(self, table_name, item_attrs):\n table = self.tables.get(table_name)\n if not table:\n return None\n\n return table.put_item(item_attrs)\n\n def get_item(self, table_name, hash_key_dict, range_key_dict):\n table = self.tables.get(table_name)\n if not table:\n return None\n\n hash_key = DynamoType(hash_key_dict)\n range_key = DynamoType(range_key_dict) if range_key_dict else None\n\n return table.get_item(hash_key, range_key)\n\n def query(self, table_name, hash_key_dict, range_comparison, range_value_dicts):\n table = self.tables.get(table_name)\n if not table:\n return None, None\n\n hash_key = DynamoType(hash_key_dict)\n range_values = [DynamoType(range_value) for range_value in range_value_dicts]\n\n return table.query(hash_key, range_comparison, range_values)\n\n def scan(self, table_name, filters):\n table = self.tables.get(table_name)\n if not table:\n return None, None, None\n\n scan_filters = {}\n for key, (comparison_operator, comparison_values) in filters.iteritems():\n dynamo_types = [DynamoType(value) for value in comparison_values]\n scan_filters[key] = (comparison_operator, dynamo_types)\n\n return table.scan(scan_filters)\n\n def delete_item(self, table_name, hash_key_dict, range_key_dict):\n table = self.tables.get(table_name)\n if not table:\n return None\n\n hash_key = DynamoType(hash_key_dict)\n range_key = DynamoType(range_key_dict) if range_key_dict else None\n\n return table.delete_item(hash_key, range_key)\n\n\ndynamodb_backend = DynamoDBBackend()\n", "path": "moto/dynamodb/models.py"}]}
3,702
139
gh_patches_debug_7158
rasdani/github-patches
git_diff
liberapay__liberapay.com-1140
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> GitLab support is broken `{"error":"API V3 is no longer supported. Use API V4 instead."}` </issue> <code> [start of liberapay/elsewhere/gitlab.py] 1 from __future__ import absolute_import, division, print_function, unicode_literals 2 3 from liberapay.elsewhere._base import PlatformOAuth2 4 from liberapay.elsewhere._extractors import key 5 from liberapay.elsewhere._paginators import header_links_paginator 6 7 8 class GitLab(PlatformOAuth2): 9 10 # Platform attributes 11 name = 'gitlab' 12 display_name = 'GitLab' 13 account_url = 'https://gitlab.com/u/{user_name}' 14 repo_url = 'https://gitlab.com/{slug}' 15 has_teams = True 16 17 # Auth attributes 18 # GitLab uses https://github.com/doorkeeper-gem/doorkeeper 19 auth_url = 'https://gitlab.com/oauth/authorize' 20 access_token_url = 'https://gitlab.com/oauth/token' 21 22 # can_auth_with_client_credentials = True 23 # https://gitlab.com/gitlab-org/gitlab-ce/issues/13795 24 25 # API attributes 26 # http://doc.gitlab.com/ce/api/ 27 api_format = 'json' 28 api_paginator = header_links_paginator(total_header='X-Total') 29 api_url = 'https://gitlab.com/api/v3' 30 api_user_info_path = '/users/{user_id}' 31 api_user_name_info_path = '/users?username={user_name}' 32 api_user_self_info_path = '/user' 33 api_team_members_path = '/groups/{user_name}/members' 34 api_repos_path = '/projects?owned=true&visibility=public&order_by=last_activity_at&per_page=100' 35 api_starred_path = '/projects?starred=true&visibility=public' 36 37 # User info extractors 38 x_user_id = key('id') 39 x_user_name = key('username') 40 x_display_name = key('name') 41 x_email = key('email') 42 x_avatar_url = key('avatar_url') 43 x_description = key('bio') 44 45 # Repo info extractors 46 x_repo_id = key('id') 47 x_repo_name = key('name') 48 x_repo_slug = key('path_with_namespace') 49 x_repo_description = key('description') 50 x_repo_last_update = key('last_activity_at') 51 x_repo_is_fork = key('forked_from_project', clean=bool) 52 x_repo_stars_count = key('star_count') 53 x_repo_owner_id = key('owner', clean=lambda d: d['id']) 54 [end of liberapay/elsewhere/gitlab.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/liberapay/elsewhere/gitlab.py b/liberapay/elsewhere/gitlab.py --- a/liberapay/elsewhere/gitlab.py +++ b/liberapay/elsewhere/gitlab.py @@ -26,7 +26,7 @@ # http://doc.gitlab.com/ce/api/ api_format = 'json' api_paginator = header_links_paginator(total_header='X-Total') - api_url = 'https://gitlab.com/api/v3' + api_url = 'https://gitlab.com/api/v4' api_user_info_path = '/users/{user_id}' api_user_name_info_path = '/users?username={user_name}' api_user_self_info_path = '/user'
{"golden_diff": "diff --git a/liberapay/elsewhere/gitlab.py b/liberapay/elsewhere/gitlab.py\n--- a/liberapay/elsewhere/gitlab.py\n+++ b/liberapay/elsewhere/gitlab.py\n@@ -26,7 +26,7 @@\n # http://doc.gitlab.com/ce/api/\n api_format = 'json'\n api_paginator = header_links_paginator(total_header='X-Total')\n- api_url = 'https://gitlab.com/api/v3'\n+ api_url = 'https://gitlab.com/api/v4'\n api_user_info_path = '/users/{user_id}'\n api_user_name_info_path = '/users?username={user_name}'\n api_user_self_info_path = '/user'\n", "issue": "GitLab support is broken\n`{\"error\":\"API V3 is no longer supported. Use API V4 instead.\"}`\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom liberapay.elsewhere._base import PlatformOAuth2\nfrom liberapay.elsewhere._extractors import key\nfrom liberapay.elsewhere._paginators import header_links_paginator\n\n\nclass GitLab(PlatformOAuth2):\n\n # Platform attributes\n name = 'gitlab'\n display_name = 'GitLab'\n account_url = 'https://gitlab.com/u/{user_name}'\n repo_url = 'https://gitlab.com/{slug}'\n has_teams = True\n\n # Auth attributes\n # GitLab uses https://github.com/doorkeeper-gem/doorkeeper\n auth_url = 'https://gitlab.com/oauth/authorize'\n access_token_url = 'https://gitlab.com/oauth/token'\n\n # can_auth_with_client_credentials = True\n # https://gitlab.com/gitlab-org/gitlab-ce/issues/13795\n\n # API attributes\n # http://doc.gitlab.com/ce/api/\n api_format = 'json'\n api_paginator = header_links_paginator(total_header='X-Total')\n api_url = 'https://gitlab.com/api/v3'\n api_user_info_path = '/users/{user_id}'\n api_user_name_info_path = '/users?username={user_name}'\n api_user_self_info_path = '/user'\n api_team_members_path = '/groups/{user_name}/members'\n api_repos_path = '/projects?owned=true&visibility=public&order_by=last_activity_at&per_page=100'\n api_starred_path = '/projects?starred=true&visibility=public'\n\n # User info extractors\n x_user_id = key('id')\n x_user_name = key('username')\n x_display_name = key('name')\n x_email = key('email')\n x_avatar_url = key('avatar_url')\n x_description = key('bio')\n\n # Repo info extractors\n x_repo_id = key('id')\n x_repo_name = key('name')\n x_repo_slug = key('path_with_namespace')\n x_repo_description = key('description')\n x_repo_last_update = key('last_activity_at')\n x_repo_is_fork = key('forked_from_project', clean=bool)\n x_repo_stars_count = key('star_count')\n x_repo_owner_id = key('owner', clean=lambda d: d['id'])\n", "path": "liberapay/elsewhere/gitlab.py"}]}
1,197
166
gh_patches_debug_5556
rasdani/github-patches
git_diff
projectmesa__mesa-1697
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Fix codespell build for tutorial docs Not sure if this is possible, but codespell will pick up image text and try to suggest text edits and says the build is broken. Hoping there is a setting somewhere that we can add to ignore the image text. Here is an example: https://github.com/projectmesa/mesa/pull/1656/files#diff-732a7eb31fbbd25075ee965837dc0092fb53c2cb64c068e60392d9bf69d9f9bbR877 <img width="1052" alt="Screenshot 2023-04-25 at 12 17 29 PM" src="https://user-images.githubusercontent.com/166734/234368855-d62da5a6-aff8-46f1-bae5-bcf8aa05607f.png"> <img width="708" alt="Screenshot 2023-04-25 at 12 26 06 PM" src="https://user-images.githubusercontent.com/166734/234369720-91c22dbb-f7b9-4b9c-8764-832eb6e63cad.png"> </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 import os 3 import re 4 import shutil 5 import urllib.request 6 import zipfile 7 from codecs import open 8 9 from setuptools import find_packages, setup 10 11 requires = ["click", "cookiecutter", "networkx", "numpy", "pandas", "tornado", "tqdm"] 12 13 extras_require = { 14 "dev": [ 15 "black", 16 "ruff==0.0.254", 17 "coverage", 18 "pytest >= 4.6", 19 "pytest-cov", 20 "sphinx", 21 ], 22 # Constrain sphinx version until https://github.com/readthedocs/readthedocs.org/issues/10279 23 # is fixed. 24 "docs": ["sphinx<7", "ipython", "nbsphinx"], 25 } 26 27 version = "" 28 with open("mesa/__init__.py") as fd: 29 version = re.search( 30 r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE 31 ).group(1) 32 33 with open("README.rst", "rb", encoding="utf-8") as f: 34 readme = f.read() 35 36 # Ensure JS dependencies are downloaded 37 external_dir = "mesa/visualization/templates/external" 38 # We use a different path for single-file JS because some of them are loaded 39 # the same way as Mesa JS files 40 external_dir_single = "mesa/visualization/templates/js/external" 41 # First, ensure that the external directories exists 42 os.makedirs(external_dir, exist_ok=True) 43 os.makedirs(external_dir_single, exist_ok=True) 44 45 46 def ensure_js_dep(dirname, url): 47 dst_path = os.path.join(external_dir, dirname) 48 if os.path.isdir(dst_path): 49 # Do nothing if already downloaded 50 return 51 print(f"Downloading the {dirname} dependency from the internet...") 52 zip_file = dirname + ".zip" 53 urllib.request.urlretrieve(url, zip_file) 54 with zipfile.ZipFile(zip_file, "r") as zip_ref: 55 zip_ref.extractall() 56 shutil.move(dirname, dst_path) 57 # Cleanup 58 os.remove(zip_file) 59 print("Done") 60 61 62 def ensure_js_dep_single(url, out_name=None): 63 # Used for downloading e.g. D3.js single file 64 if out_name is None: 65 out_name = url.split("/")[-1] 66 dst_path = os.path.join(external_dir_single, out_name) 67 if os.path.isfile(dst_path): 68 return 69 print(f"Downloading the {out_name} dependency from the internet...") 70 urllib.request.urlretrieve(url, out_name) 71 shutil.move(out_name, dst_path) 72 73 74 # Important: when you update JS dependency version, make sure to also update the 75 # hardcoded included files and versions in: mesa/visualization/templates/modular_template.html 76 77 # Ensure Bootstrap 78 bootstrap_version = "5.1.3" 79 ensure_js_dep( 80 f"bootstrap-{bootstrap_version}-dist", 81 f"https://github.com/twbs/bootstrap/releases/download/v{bootstrap_version}/bootstrap-{bootstrap_version}-dist.zip", 82 ) 83 84 # Ensure Bootstrap Slider 85 bootstrap_slider_version = "11.0.2" 86 ensure_js_dep( 87 f"bootstrap-slider-{bootstrap_slider_version}", 88 f"https://github.com/seiyria/bootstrap-slider/archive/refs/tags/v{bootstrap_slider_version}.zip", 89 ) 90 91 # Important: when updating the D3 version, make sure to update the constant 92 # D3_JS_FILE in mesa/visualization/ModularVisualization.py. 93 d3_version = "7.4.3" 94 ensure_js_dep_single( 95 f"https://cdnjs.cloudflare.com/ajax/libs/d3/{d3_version}/d3.min.js", 96 out_name=f"d3-{d3_version}.min.js", 97 ) 98 # Important: Make sure to update CHART_JS_FILE in 99 # mesa/visualization/ModularVisualization.py. 100 chartjs_version = "3.6.1" 101 ensure_js_dep_single( 102 f"https://cdn.jsdelivr.net/npm/chart.js@{chartjs_version}/dist/chart.min.js", 103 out_name=f"chart-{chartjs_version}.min.js", 104 ) 105 106 107 setup( 108 name="Mesa", 109 version=version, 110 description="Agent-based modeling (ABM) in Python 3+", 111 long_description=readme, 112 author="Project Mesa Team", 113 author_email="[email protected]", 114 url="https://github.com/projectmesa/mesa", 115 packages=find_packages(), 116 package_data={ 117 "mesa": [ 118 "visualization/templates/*.html", 119 "visualization/templates/css/*", 120 "visualization/templates/js/*", 121 "visualization/templates/external/**/*", 122 ], 123 "cookiecutter-mesa": ["cookiecutter-mesa/*"], 124 }, 125 include_package_data=True, 126 install_requires=requires, 127 extras_require=extras_require, 128 keywords="agent based modeling model ABM simulation multi-agent", 129 license="Apache 2.0", 130 zip_safe=False, 131 classifiers=[ 132 "Topic :: Scientific/Engineering", 133 "Topic :: Scientific/Engineering :: Artificial Life", 134 "Topic :: Scientific/Engineering :: Artificial Intelligence", 135 "Intended Audience :: Science/Research", 136 "Programming Language :: Python :: 3 :: Only", 137 "Programming Language :: Python :: 3.8", 138 "Programming Language :: Python :: 3.9", 139 "Programming Language :: Python :: 3.10", 140 "License :: OSI Approved :: Apache Software License", 141 "Operating System :: OS Independent", 142 "Development Status :: 3 - Alpha", 143 "Natural Language :: English", 144 ], 145 entry_points=""" 146 [console_scripts] 147 mesa=mesa.main:cli 148 """, 149 python_requires=">=3.8", 150 ) 151 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -21,7 +21,10 @@ ], # Constrain sphinx version until https://github.com/readthedocs/readthedocs.org/issues/10279 # is fixed. - "docs": ["sphinx<7", "ipython", "nbsphinx"], + # Explicitly install ipykernel for Python 3.8. + # See https://stackoverflow.com/questions/28831854/how-do-i-add-python3-kernel-to-jupyter-ipython + # Could be removed in the future + "docs": ["sphinx<7", "ipython", "nbsphinx", "ipykernel"], } version = ""
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -21,7 +21,10 @@\n ],\n # Constrain sphinx version until https://github.com/readthedocs/readthedocs.org/issues/10279\n # is fixed.\n- \"docs\": [\"sphinx<7\", \"ipython\", \"nbsphinx\"],\n+ # Explicitly install ipykernel for Python 3.8.\n+ # See https://stackoverflow.com/questions/28831854/how-do-i-add-python3-kernel-to-jupyter-ipython\n+ # Could be removed in the future\n+ \"docs\": [\"sphinx<7\", \"ipython\", \"nbsphinx\", \"ipykernel\"],\n }\n \n version = \"\"\n", "issue": "Fix codespell build for tutorial docs\nNot sure if this is possible, but codespell will pick up image text and try to suggest text edits and says the build is broken. \r\nHoping there is a setting somewhere that we can add to ignore the image text. \r\n\r\nHere is an example: https://github.com/projectmesa/mesa/pull/1656/files#diff-732a7eb31fbbd25075ee965837dc0092fb53c2cb64c068e60392d9bf69d9f9bbR877\r\n\r\n<img width=\"1052\" alt=\"Screenshot 2023-04-25 at 12 17 29 PM\" src=\"https://user-images.githubusercontent.com/166734/234368855-d62da5a6-aff8-46f1-bae5-bcf8aa05607f.png\">\r\n\r\n<img width=\"708\" alt=\"Screenshot 2023-04-25 at 12 26 06 PM\" src=\"https://user-images.githubusercontent.com/166734/234369720-91c22dbb-f7b9-4b9c-8764-832eb6e63cad.png\">\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport os\nimport re\nimport shutil\nimport urllib.request\nimport zipfile\nfrom codecs import open\n\nfrom setuptools import find_packages, setup\n\nrequires = [\"click\", \"cookiecutter\", \"networkx\", \"numpy\", \"pandas\", \"tornado\", \"tqdm\"]\n\nextras_require = {\n \"dev\": [\n \"black\",\n \"ruff==0.0.254\",\n \"coverage\",\n \"pytest >= 4.6\",\n \"pytest-cov\",\n \"sphinx\",\n ],\n # Constrain sphinx version until https://github.com/readthedocs/readthedocs.org/issues/10279\n # is fixed.\n \"docs\": [\"sphinx<7\", \"ipython\", \"nbsphinx\"],\n}\n\nversion = \"\"\nwith open(\"mesa/__init__.py\") as fd:\n version = re.search(\n r'^__version__\\s*=\\s*[\\'\"]([^\\'\"]*)[\\'\"]', fd.read(), re.MULTILINE\n ).group(1)\n\nwith open(\"README.rst\", \"rb\", encoding=\"utf-8\") as f:\n readme = f.read()\n\n# Ensure JS dependencies are downloaded\nexternal_dir = \"mesa/visualization/templates/external\"\n# We use a different path for single-file JS because some of them are loaded\n# the same way as Mesa JS files\nexternal_dir_single = \"mesa/visualization/templates/js/external\"\n# First, ensure that the external directories exists\nos.makedirs(external_dir, exist_ok=True)\nos.makedirs(external_dir_single, exist_ok=True)\n\n\ndef ensure_js_dep(dirname, url):\n dst_path = os.path.join(external_dir, dirname)\n if os.path.isdir(dst_path):\n # Do nothing if already downloaded\n return\n print(f\"Downloading the {dirname} dependency from the internet...\")\n zip_file = dirname + \".zip\"\n urllib.request.urlretrieve(url, zip_file)\n with zipfile.ZipFile(zip_file, \"r\") as zip_ref:\n zip_ref.extractall()\n shutil.move(dirname, dst_path)\n # Cleanup\n os.remove(zip_file)\n print(\"Done\")\n\n\ndef ensure_js_dep_single(url, out_name=None):\n # Used for downloading e.g. D3.js single file\n if out_name is None:\n out_name = url.split(\"/\")[-1]\n dst_path = os.path.join(external_dir_single, out_name)\n if os.path.isfile(dst_path):\n return\n print(f\"Downloading the {out_name} dependency from the internet...\")\n urllib.request.urlretrieve(url, out_name)\n shutil.move(out_name, dst_path)\n\n\n# Important: when you update JS dependency version, make sure to also update the\n# hardcoded included files and versions in: mesa/visualization/templates/modular_template.html\n\n# Ensure Bootstrap\nbootstrap_version = \"5.1.3\"\nensure_js_dep(\n f\"bootstrap-{bootstrap_version}-dist\",\n f\"https://github.com/twbs/bootstrap/releases/download/v{bootstrap_version}/bootstrap-{bootstrap_version}-dist.zip\",\n)\n\n# Ensure Bootstrap Slider\nbootstrap_slider_version = \"11.0.2\"\nensure_js_dep(\n f\"bootstrap-slider-{bootstrap_slider_version}\",\n f\"https://github.com/seiyria/bootstrap-slider/archive/refs/tags/v{bootstrap_slider_version}.zip\",\n)\n\n# Important: when updating the D3 version, make sure to update the constant\n# D3_JS_FILE in mesa/visualization/ModularVisualization.py.\nd3_version = \"7.4.3\"\nensure_js_dep_single(\n f\"https://cdnjs.cloudflare.com/ajax/libs/d3/{d3_version}/d3.min.js\",\n out_name=f\"d3-{d3_version}.min.js\",\n)\n# Important: Make sure to update CHART_JS_FILE in\n# mesa/visualization/ModularVisualization.py.\nchartjs_version = \"3.6.1\"\nensure_js_dep_single(\n f\"https://cdn.jsdelivr.net/npm/chart.js@{chartjs_version}/dist/chart.min.js\",\n out_name=f\"chart-{chartjs_version}.min.js\",\n)\n\n\nsetup(\n name=\"Mesa\",\n version=version,\n description=\"Agent-based modeling (ABM) in Python 3+\",\n long_description=readme,\n author=\"Project Mesa Team\",\n author_email=\"[email protected]\",\n url=\"https://github.com/projectmesa/mesa\",\n packages=find_packages(),\n package_data={\n \"mesa\": [\n \"visualization/templates/*.html\",\n \"visualization/templates/css/*\",\n \"visualization/templates/js/*\",\n \"visualization/templates/external/**/*\",\n ],\n \"cookiecutter-mesa\": [\"cookiecutter-mesa/*\"],\n },\n include_package_data=True,\n install_requires=requires,\n extras_require=extras_require,\n keywords=\"agent based modeling model ABM simulation multi-agent\",\n license=\"Apache 2.0\",\n zip_safe=False,\n classifiers=[\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Artificial Life\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Intended Audience :: Science/Research\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 3 - Alpha\",\n \"Natural Language :: English\",\n ],\n entry_points=\"\"\"\n [console_scripts]\n mesa=mesa.main:cli\n \"\"\",\n python_requires=\">=3.8\",\n)\n", "path": "setup.py"}]}
2,418
175
gh_patches_debug_11744
rasdani/github-patches
git_diff
sanic-org__sanic-186
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Unparseable JSON should not be ignored request.py, line 64 Returning a None here causes breakage in application code further down the line. Generate a 400 error here on malformed JSON to protect the server. </issue> <code> [start of sanic/request.py] 1 from cgi import parse_header 2 from collections import namedtuple 3 from http.cookies import SimpleCookie 4 from httptools import parse_url 5 from urllib.parse import parse_qs 6 from ujson import loads as json_loads 7 8 from .log import log 9 10 11 DEFAULT_HTTP_CONTENT_TYPE = "application/octet-stream" 12 # HTTP/1.1: https://www.w3.org/Protocols/rfc2616/rfc2616-sec7.html#sec7.2.1 13 # > If the media type remains unknown, the recipient SHOULD treat it 14 # > as type "application/octet-stream" 15 16 17 class RequestParameters(dict): 18 """ 19 Hosts a dict with lists as values where get returns the first 20 value of the list and getlist returns the whole shebang 21 """ 22 23 def __init__(self, *args, **kwargs): 24 self.super = super() 25 self.super.__init__(*args, **kwargs) 26 27 def get(self, name, default=None): 28 values = self.super.get(name) 29 return values[0] if values else default 30 31 def getlist(self, name, default=None): 32 return self.super.get(name, default) 33 34 35 class Request(dict): 36 """ 37 Properties of an HTTP request such as URL, headers, etc. 38 """ 39 __slots__ = ( 40 'url', 'headers', 'version', 'method', '_cookies', 41 'query_string', 'body', 42 'parsed_json', 'parsed_args', 'parsed_form', 'parsed_files', 43 ) 44 45 def __init__(self, url_bytes, headers, version, method): 46 # TODO: Content-Encoding detection 47 url_parsed = parse_url(url_bytes) 48 self.url = url_parsed.path.decode('utf-8') 49 self.headers = headers 50 self.version = version 51 self.method = method 52 self.query_string = None 53 if url_parsed.query: 54 self.query_string = url_parsed.query.decode('utf-8') 55 56 # Init but do not inhale 57 self.body = None 58 self.parsed_json = None 59 self.parsed_form = None 60 self.parsed_files = None 61 self.parsed_args = None 62 self._cookies = None 63 64 @property 65 def json(self): 66 if not self.parsed_json: 67 try: 68 self.parsed_json = json_loads(self.body) 69 except Exception: 70 log.exception("Failed when parsing body as json") 71 72 return self.parsed_json 73 74 @property 75 def form(self): 76 if self.parsed_form is None: 77 self.parsed_form = RequestParameters() 78 self.parsed_files = RequestParameters() 79 content_type = self.headers.get( 80 'Content-Type', DEFAULT_HTTP_CONTENT_TYPE) 81 content_type, parameters = parse_header(content_type) 82 try: 83 if content_type == 'application/x-www-form-urlencoded': 84 self.parsed_form = RequestParameters( 85 parse_qs(self.body.decode('utf-8'))) 86 elif content_type == 'multipart/form-data': 87 # TODO: Stream this instead of reading to/from memory 88 boundary = parameters['boundary'].encode('utf-8') 89 self.parsed_form, self.parsed_files = ( 90 parse_multipart_form(self.body, boundary)) 91 except Exception: 92 log.exception("Failed when parsing form") 93 94 return self.parsed_form 95 96 @property 97 def files(self): 98 if self.parsed_files is None: 99 self.form # compute form to get files 100 101 return self.parsed_files 102 103 @property 104 def args(self): 105 if self.parsed_args is None: 106 if self.query_string: 107 self.parsed_args = RequestParameters( 108 parse_qs(self.query_string)) 109 else: 110 self.parsed_args = {} 111 112 return self.parsed_args 113 114 @property 115 def cookies(self): 116 if self._cookies is None: 117 cookie = self.headers.get('Cookie') or self.headers.get('cookie') 118 if cookie is not None: 119 cookies = SimpleCookie() 120 cookies.load(cookie) 121 self._cookies = {name: cookie.value 122 for name, cookie in cookies.items()} 123 else: 124 self._cookies = {} 125 return self._cookies 126 127 128 File = namedtuple('File', ['type', 'body', 'name']) 129 130 131 def parse_multipart_form(body, boundary): 132 """ 133 Parses a request body and returns fields and files 134 :param body: Bytes request body 135 :param boundary: Bytes multipart boundary 136 :return: fields (RequestParameters), files (RequestParameters) 137 """ 138 files = RequestParameters() 139 fields = RequestParameters() 140 141 form_parts = body.split(boundary) 142 for form_part in form_parts[1:-1]: 143 file_name = None 144 file_type = None 145 field_name = None 146 line_index = 2 147 line_end_index = 0 148 while not line_end_index == -1: 149 line_end_index = form_part.find(b'\r\n', line_index) 150 form_line = form_part[line_index:line_end_index].decode('utf-8') 151 line_index = line_end_index + 2 152 153 if not form_line: 154 break 155 156 colon_index = form_line.index(':') 157 form_header_field = form_line[0:colon_index] 158 form_header_value, form_parameters = parse_header( 159 form_line[colon_index + 2:]) 160 161 if form_header_field == 'Content-Disposition': 162 if 'filename' in form_parameters: 163 file_name = form_parameters['filename'] 164 field_name = form_parameters.get('name') 165 elif form_header_field == 'Content-Type': 166 file_type = form_header_value 167 168 post_data = form_part[line_index:-4] 169 if file_name or file_type: 170 file = File(type=file_type, name=file_name, body=post_data) 171 if field_name in files: 172 files[field_name].append(file) 173 else: 174 files[field_name] = [file] 175 else: 176 value = post_data.decode('utf-8') 177 if field_name in fields: 178 fields[field_name].append(value) 179 else: 180 fields[field_name] = [value] 181 182 return fields, files 183 [end of sanic/request.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sanic/request.py b/sanic/request.py --- a/sanic/request.py +++ b/sanic/request.py @@ -4,6 +4,7 @@ from httptools import parse_url from urllib.parse import parse_qs from ujson import loads as json_loads +from sanic.exceptions import InvalidUsage from .log import log @@ -67,7 +68,7 @@ try: self.parsed_json = json_loads(self.body) except Exception: - log.exception("Failed when parsing body as json") + raise InvalidUsage("Failed when parsing body as json") return self.parsed_json
{"golden_diff": "diff --git a/sanic/request.py b/sanic/request.py\n--- a/sanic/request.py\n+++ b/sanic/request.py\n@@ -4,6 +4,7 @@\n from httptools import parse_url\n from urllib.parse import parse_qs\n from ujson import loads as json_loads\n+from sanic.exceptions import InvalidUsage\n \n from .log import log\n \n@@ -67,7 +68,7 @@\n try:\n self.parsed_json = json_loads(self.body)\n except Exception:\n- log.exception(\"Failed when parsing body as json\")\n+ raise InvalidUsage(\"Failed when parsing body as json\")\n \n return self.parsed_json\n", "issue": "Unparseable JSON should not be ignored\nrequest.py, line 64\n\nReturning a None here causes breakage in application code further down the line. Generate a 400 error here on malformed JSON to protect the server.\n\n", "before_files": [{"content": "from cgi import parse_header\nfrom collections import namedtuple\nfrom http.cookies import SimpleCookie\nfrom httptools import parse_url\nfrom urllib.parse import parse_qs\nfrom ujson import loads as json_loads\n\nfrom .log import log\n\n\nDEFAULT_HTTP_CONTENT_TYPE = \"application/octet-stream\"\n# HTTP/1.1: https://www.w3.org/Protocols/rfc2616/rfc2616-sec7.html#sec7.2.1\n# > If the media type remains unknown, the recipient SHOULD treat it\n# > as type \"application/octet-stream\"\n\n\nclass RequestParameters(dict):\n \"\"\"\n Hosts a dict with lists as values where get returns the first\n value of the list and getlist returns the whole shebang\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self.super = super()\n self.super.__init__(*args, **kwargs)\n\n def get(self, name, default=None):\n values = self.super.get(name)\n return values[0] if values else default\n\n def getlist(self, name, default=None):\n return self.super.get(name, default)\n\n\nclass Request(dict):\n \"\"\"\n Properties of an HTTP request such as URL, headers, etc.\n \"\"\"\n __slots__ = (\n 'url', 'headers', 'version', 'method', '_cookies',\n 'query_string', 'body',\n 'parsed_json', 'parsed_args', 'parsed_form', 'parsed_files',\n )\n\n def __init__(self, url_bytes, headers, version, method):\n # TODO: Content-Encoding detection\n url_parsed = parse_url(url_bytes)\n self.url = url_parsed.path.decode('utf-8')\n self.headers = headers\n self.version = version\n self.method = method\n self.query_string = None\n if url_parsed.query:\n self.query_string = url_parsed.query.decode('utf-8')\n\n # Init but do not inhale\n self.body = None\n self.parsed_json = None\n self.parsed_form = None\n self.parsed_files = None\n self.parsed_args = None\n self._cookies = None\n\n @property\n def json(self):\n if not self.parsed_json:\n try:\n self.parsed_json = json_loads(self.body)\n except Exception:\n log.exception(\"Failed when parsing body as json\")\n\n return self.parsed_json\n\n @property\n def form(self):\n if self.parsed_form is None:\n self.parsed_form = RequestParameters()\n self.parsed_files = RequestParameters()\n content_type = self.headers.get(\n 'Content-Type', DEFAULT_HTTP_CONTENT_TYPE)\n content_type, parameters = parse_header(content_type)\n try:\n if content_type == 'application/x-www-form-urlencoded':\n self.parsed_form = RequestParameters(\n parse_qs(self.body.decode('utf-8')))\n elif content_type == 'multipart/form-data':\n # TODO: Stream this instead of reading to/from memory\n boundary = parameters['boundary'].encode('utf-8')\n self.parsed_form, self.parsed_files = (\n parse_multipart_form(self.body, boundary))\n except Exception:\n log.exception(\"Failed when parsing form\")\n\n return self.parsed_form\n\n @property\n def files(self):\n if self.parsed_files is None:\n self.form # compute form to get files\n\n return self.parsed_files\n\n @property\n def args(self):\n if self.parsed_args is None:\n if self.query_string:\n self.parsed_args = RequestParameters(\n parse_qs(self.query_string))\n else:\n self.parsed_args = {}\n\n return self.parsed_args\n\n @property\n def cookies(self):\n if self._cookies is None:\n cookie = self.headers.get('Cookie') or self.headers.get('cookie')\n if cookie is not None:\n cookies = SimpleCookie()\n cookies.load(cookie)\n self._cookies = {name: cookie.value\n for name, cookie in cookies.items()}\n else:\n self._cookies = {}\n return self._cookies\n\n\nFile = namedtuple('File', ['type', 'body', 'name'])\n\n\ndef parse_multipart_form(body, boundary):\n \"\"\"\n Parses a request body and returns fields and files\n :param body: Bytes request body\n :param boundary: Bytes multipart boundary\n :return: fields (RequestParameters), files (RequestParameters)\n \"\"\"\n files = RequestParameters()\n fields = RequestParameters()\n\n form_parts = body.split(boundary)\n for form_part in form_parts[1:-1]:\n file_name = None\n file_type = None\n field_name = None\n line_index = 2\n line_end_index = 0\n while not line_end_index == -1:\n line_end_index = form_part.find(b'\\r\\n', line_index)\n form_line = form_part[line_index:line_end_index].decode('utf-8')\n line_index = line_end_index + 2\n\n if not form_line:\n break\n\n colon_index = form_line.index(':')\n form_header_field = form_line[0:colon_index]\n form_header_value, form_parameters = parse_header(\n form_line[colon_index + 2:])\n\n if form_header_field == 'Content-Disposition':\n if 'filename' in form_parameters:\n file_name = form_parameters['filename']\n field_name = form_parameters.get('name')\n elif form_header_field == 'Content-Type':\n file_type = form_header_value\n\n post_data = form_part[line_index:-4]\n if file_name or file_type:\n file = File(type=file_type, name=file_name, body=post_data)\n if field_name in files:\n files[field_name].append(file)\n else:\n files[field_name] = [file]\n else:\n value = post_data.decode('utf-8')\n if field_name in fields:\n fields[field_name].append(value)\n else:\n fields[field_name] = [value]\n\n return fields, files\n", "path": "sanic/request.py"}]}
2,328
142
gh_patches_debug_20940
rasdani/github-patches
git_diff
inventree__InvenTree-2984
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add unittests for auth stack Add full coverage for https://github.com/inventree/InvenTree/pull/2976 And the full auth stack in the middleware Add unittests for auth stack Add full coverage for https://github.com/inventree/InvenTree/pull/2976 And the full auth stack in the middleware </issue> <code> [start of InvenTree/InvenTree/middleware.py] 1 # -*- coding: utf-8 -*- 2 3 from django.conf import settings 4 from django.contrib.auth.middleware import PersistentRemoteUserMiddleware 5 from django.http import HttpResponse 6 from django.shortcuts import HttpResponseRedirect 7 from django.shortcuts import redirect 8 from django.urls import reverse_lazy, Resolver404 9 from django.urls import include, re_path 10 11 import logging 12 13 from rest_framework.authtoken.models import Token 14 from allauth_2fa.middleware import BaseRequire2FAMiddleware, AllauthTwoFactorMiddleware 15 16 from InvenTree.urls import frontendpatterns 17 from common.models import InvenTreeSetting 18 19 20 logger = logging.getLogger("inventree") 21 22 23 class AuthRequiredMiddleware(object): 24 def __init__(self, get_response): 25 self.get_response = get_response 26 27 def __call__(self, request): 28 # Code to be executed for each request before 29 # the view (and later middleware) are called. 30 31 assert hasattr(request, 'user') 32 33 # API requests are handled by the DRF library 34 if request.path_info.startswith('/api/'): 35 return self.get_response(request) 36 37 if not request.user.is_authenticated: 38 """ 39 Normally, a web-based session would use csrftoken based authentication. 40 However when running an external application (e.g. the InvenTree app or Python library), 41 we must validate the user token manually. 42 """ 43 44 authorized = False 45 46 # Allow static files to be accessed without auth 47 # Important for e.g. login page 48 if request.path_info.startswith('/static/'): 49 authorized = True 50 51 # Unauthorized users can access the login page 52 elif request.path_info.startswith('/accounts/'): 53 authorized = True 54 55 elif 'Authorization' in request.headers.keys() or 'authorization' in request.headers.keys(): 56 auth = request.headers.get('Authorization', request.headers.get('authorization')).strip() 57 58 if auth.lower().startswith('token') and len(auth.split()) == 2: 59 token_key = auth.split()[1] 60 61 # Does the provided token match a valid user? 62 try: 63 token = Token.objects.get(key=token_key) 64 65 # Provide the user information to the request 66 request.user = token.user 67 authorized = True 68 69 except Token.DoesNotExist: 70 logger.warning(f"Access denied for unknown token {token_key}") 71 72 # No authorization was found for the request 73 if not authorized: 74 # A logout request will redirect the user to the login screen 75 if request.path_info == reverse_lazy('account_logout'): 76 return HttpResponseRedirect(reverse_lazy('account_login')) 77 78 path = request.path_info 79 80 # List of URL endpoints we *do not* want to redirect to 81 urls = [ 82 reverse_lazy('account_login'), 83 reverse_lazy('account_logout'), 84 reverse_lazy('admin:login'), 85 reverse_lazy('admin:logout'), 86 ] 87 88 # Do not redirect requests to any of these paths 89 paths_ignore = [ 90 '/api/', 91 '/js/', 92 '/media/', 93 '/static/', 94 ] 95 96 if path not in urls and not any([path.startswith(p) for p in paths_ignore]): 97 # Save the 'next' parameter to pass through to the login view 98 99 return redirect('{}?next={}'.format(reverse_lazy('account_login'), request.path)) 100 101 else: 102 # Return a 401 (Unauthorized) response code for this request 103 return HttpResponse('Unauthorized', status=401) 104 105 response = self.get_response(request) 106 107 return response 108 109 110 url_matcher = re_path('', include(frontendpatterns)) 111 112 113 class Check2FAMiddleware(BaseRequire2FAMiddleware): 114 """check if user is required to have MFA enabled""" 115 def require_2fa(self, request): 116 # Superusers are require to have 2FA. 117 try: 118 if url_matcher.resolve(request.path[1:]): 119 return InvenTreeSetting.get_setting('LOGIN_ENFORCE_MFA') 120 except Resolver404: 121 pass 122 return False 123 124 125 class CustomAllauthTwoFactorMiddleware(AllauthTwoFactorMiddleware): 126 """This function ensures only frontend code triggers the MFA auth cycle""" 127 def process_request(self, request): 128 try: 129 if not url_matcher.resolve(request.path[1:]): 130 super().process_request(request) 131 except Resolver404: 132 pass 133 134 135 class InvenTreeRemoteUserMiddleware(PersistentRemoteUserMiddleware): 136 """ 137 Middleware to check if HTTP-header based auth is enabled and to set it up 138 """ 139 header = settings.REMOTE_LOGIN_HEADER 140 141 def process_request(self, request): 142 if not settings.REMOTE_LOGIN: 143 return 144 145 return super().process_request(request) 146 [end of InvenTree/InvenTree/middleware.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/InvenTree/InvenTree/middleware.py b/InvenTree/InvenTree/middleware.py --- a/InvenTree/InvenTree/middleware.py +++ b/InvenTree/InvenTree/middleware.py @@ -3,7 +3,6 @@ from django.conf import settings from django.contrib.auth.middleware import PersistentRemoteUserMiddleware from django.http import HttpResponse -from django.shortcuts import HttpResponseRedirect from django.shortcuts import redirect from django.urls import reverse_lazy, Resolver404 from django.urls import include, re_path @@ -71,10 +70,6 @@ # No authorization was found for the request if not authorized: - # A logout request will redirect the user to the login screen - if request.path_info == reverse_lazy('account_logout'): - return HttpResponseRedirect(reverse_lazy('account_login')) - path = request.path_info # List of URL endpoints we *do not* want to redirect to
{"golden_diff": "diff --git a/InvenTree/InvenTree/middleware.py b/InvenTree/InvenTree/middleware.py\n--- a/InvenTree/InvenTree/middleware.py\n+++ b/InvenTree/InvenTree/middleware.py\n@@ -3,7 +3,6 @@\n from django.conf import settings\n from django.contrib.auth.middleware import PersistentRemoteUserMiddleware\n from django.http import HttpResponse\n-from django.shortcuts import HttpResponseRedirect\n from django.shortcuts import redirect\n from django.urls import reverse_lazy, Resolver404\n from django.urls import include, re_path\n@@ -71,10 +70,6 @@\n \n # No authorization was found for the request\n if not authorized:\n- # A logout request will redirect the user to the login screen\n- if request.path_info == reverse_lazy('account_logout'):\n- return HttpResponseRedirect(reverse_lazy('account_login'))\n-\n path = request.path_info\n \n # List of URL endpoints we *do not* want to redirect to\n", "issue": "Add unittests for auth stack\nAdd full coverage for https://github.com/inventree/InvenTree/pull/2976\r\nAnd the full auth stack in the middleware\nAdd unittests for auth stack\nAdd full coverage for https://github.com/inventree/InvenTree/pull/2976\r\nAnd the full auth stack in the middleware\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom django.conf import settings\nfrom django.contrib.auth.middleware import PersistentRemoteUserMiddleware\nfrom django.http import HttpResponse\nfrom django.shortcuts import HttpResponseRedirect\nfrom django.shortcuts import redirect\nfrom django.urls import reverse_lazy, Resolver404\nfrom django.urls import include, re_path\n\nimport logging\n\nfrom rest_framework.authtoken.models import Token\nfrom allauth_2fa.middleware import BaseRequire2FAMiddleware, AllauthTwoFactorMiddleware\n\nfrom InvenTree.urls import frontendpatterns\nfrom common.models import InvenTreeSetting\n\n\nlogger = logging.getLogger(\"inventree\")\n\n\nclass AuthRequiredMiddleware(object):\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n # Code to be executed for each request before\n # the view (and later middleware) are called.\n\n assert hasattr(request, 'user')\n\n # API requests are handled by the DRF library\n if request.path_info.startswith('/api/'):\n return self.get_response(request)\n\n if not request.user.is_authenticated:\n \"\"\"\n Normally, a web-based session would use csrftoken based authentication.\n However when running an external application (e.g. the InvenTree app or Python library),\n we must validate the user token manually.\n \"\"\"\n\n authorized = False\n\n # Allow static files to be accessed without auth\n # Important for e.g. login page\n if request.path_info.startswith('/static/'):\n authorized = True\n\n # Unauthorized users can access the login page\n elif request.path_info.startswith('/accounts/'):\n authorized = True\n\n elif 'Authorization' in request.headers.keys() or 'authorization' in request.headers.keys():\n auth = request.headers.get('Authorization', request.headers.get('authorization')).strip()\n\n if auth.lower().startswith('token') and len(auth.split()) == 2:\n token_key = auth.split()[1]\n\n # Does the provided token match a valid user?\n try:\n token = Token.objects.get(key=token_key)\n\n # Provide the user information to the request\n request.user = token.user\n authorized = True\n\n except Token.DoesNotExist:\n logger.warning(f\"Access denied for unknown token {token_key}\")\n\n # No authorization was found for the request\n if not authorized:\n # A logout request will redirect the user to the login screen\n if request.path_info == reverse_lazy('account_logout'):\n return HttpResponseRedirect(reverse_lazy('account_login'))\n\n path = request.path_info\n\n # List of URL endpoints we *do not* want to redirect to\n urls = [\n reverse_lazy('account_login'),\n reverse_lazy('account_logout'),\n reverse_lazy('admin:login'),\n reverse_lazy('admin:logout'),\n ]\n\n # Do not redirect requests to any of these paths\n paths_ignore = [\n '/api/',\n '/js/',\n '/media/',\n '/static/',\n ]\n\n if path not in urls and not any([path.startswith(p) for p in paths_ignore]):\n # Save the 'next' parameter to pass through to the login view\n\n return redirect('{}?next={}'.format(reverse_lazy('account_login'), request.path))\n\n else:\n # Return a 401 (Unauthorized) response code for this request\n return HttpResponse('Unauthorized', status=401)\n\n response = self.get_response(request)\n\n return response\n\n\nurl_matcher = re_path('', include(frontendpatterns))\n\n\nclass Check2FAMiddleware(BaseRequire2FAMiddleware):\n \"\"\"check if user is required to have MFA enabled\"\"\"\n def require_2fa(self, request):\n # Superusers are require to have 2FA.\n try:\n if url_matcher.resolve(request.path[1:]):\n return InvenTreeSetting.get_setting('LOGIN_ENFORCE_MFA')\n except Resolver404:\n pass\n return False\n\n\nclass CustomAllauthTwoFactorMiddleware(AllauthTwoFactorMiddleware):\n \"\"\"This function ensures only frontend code triggers the MFA auth cycle\"\"\"\n def process_request(self, request):\n try:\n if not url_matcher.resolve(request.path[1:]):\n super().process_request(request)\n except Resolver404:\n pass\n\n\nclass InvenTreeRemoteUserMiddleware(PersistentRemoteUserMiddleware):\n \"\"\"\n Middleware to check if HTTP-header based auth is enabled and to set it up\n \"\"\"\n header = settings.REMOTE_LOGIN_HEADER\n\n def process_request(self, request):\n if not settings.REMOTE_LOGIN:\n return\n\n return super().process_request(request)\n", "path": "InvenTree/InvenTree/middleware.py"}]}
1,947
213
gh_patches_debug_1058
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-4303
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Domain missing from Holland & Barrett website URLs In the holland_and_barrett spider results, the website values returned are missing the domain, e.g. `"website": "/stores/aylesbury-3180/"`. This is what's in the code that the scraper is reading. But presumably AllThePlaces should return a fully qualified url, i.e. `https://www.hollandandbarrett.com/stores/aylesbury-3180/` in this case. I don't know what the micordata etc standards say about whether relative URLs are allowed, but perhaps the framework code could be modified to automatically complete the URL of the page if a relative URL is harvested. </issue> <code> [start of locations/spiders/holland_and_barrett.py] 1 from scrapy.spiders import SitemapSpider 2 3 from locations.linked_data_parser import LinkedDataParser 4 5 6 class HollandAndBarrettSpider(SitemapSpider): 7 name = "holland_and_barrett" 8 item_attributes = { 9 "brand": "Holland & Barrett", 10 "brand_wikidata": "Q5880870", 11 } 12 sitemap_urls = [ 13 "https://www.hollandandbarrett.com/sitemap-stores.xml", 14 "https://www.hollandandbarrett.nl/sitemap-stores.xml", 15 "https://www.hollandandbarrett.be/sitemap-stores.xml", 16 "https://www.hollandandbarrett.ie/sitemap-stores.xml", 17 ] 18 sitemap_rules = [("/stores/", "parse"), ("/winkels/", "parse")] 19 download_delay = 1.0 20 21 def parse(self, response): 22 yield LinkedDataParser.parse(response, "LocalBusiness") 23 [end of locations/spiders/holland_and_barrett.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/locations/spiders/holland_and_barrett.py b/locations/spiders/holland_and_barrett.py --- a/locations/spiders/holland_and_barrett.py +++ b/locations/spiders/holland_and_barrett.py @@ -19,4 +19,6 @@ download_delay = 1.0 def parse(self, response): - yield LinkedDataParser.parse(response, "LocalBusiness") + item = LinkedDataParser.parse(response, "LocalBusiness") + item["website"] = response.urljoin(item["website"]) + yield item
{"golden_diff": "diff --git a/locations/spiders/holland_and_barrett.py b/locations/spiders/holland_and_barrett.py\n--- a/locations/spiders/holland_and_barrett.py\n+++ b/locations/spiders/holland_and_barrett.py\n@@ -19,4 +19,6 @@\n download_delay = 1.0\n \n def parse(self, response):\n- yield LinkedDataParser.parse(response, \"LocalBusiness\")\n+ item = LinkedDataParser.parse(response, \"LocalBusiness\")\n+ item[\"website\"] = response.urljoin(item[\"website\"])\n+ yield item\n", "issue": "Domain missing from Holland & Barrett website URLs\nIn the holland_and_barrett spider results, the website values returned are missing the domain, e.g. `\"website\": \"/stores/aylesbury-3180/\"`. This is what's in the code that the scraper is reading. But presumably AllThePlaces should return a fully qualified url, i.e. `https://www.hollandandbarrett.com/stores/aylesbury-3180/` in this case.\r\n\r\nI don't know what the micordata etc standards say about whether relative URLs are allowed, but perhaps the framework code could be modified to automatically complete the URL of the page if a relative URL is harvested.\n", "before_files": [{"content": "from scrapy.spiders import SitemapSpider\n\nfrom locations.linked_data_parser import LinkedDataParser\n\n\nclass HollandAndBarrettSpider(SitemapSpider):\n name = \"holland_and_barrett\"\n item_attributes = {\n \"brand\": \"Holland & Barrett\",\n \"brand_wikidata\": \"Q5880870\",\n }\n sitemap_urls = [\n \"https://www.hollandandbarrett.com/sitemap-stores.xml\",\n \"https://www.hollandandbarrett.nl/sitemap-stores.xml\",\n \"https://www.hollandandbarrett.be/sitemap-stores.xml\",\n \"https://www.hollandandbarrett.ie/sitemap-stores.xml\",\n ]\n sitemap_rules = [(\"/stores/\", \"parse\"), (\"/winkels/\", \"parse\")]\n download_delay = 1.0\n\n def parse(self, response):\n yield LinkedDataParser.parse(response, \"LocalBusiness\")\n", "path": "locations/spiders/holland_and_barrett.py"}]}
923
126
gh_patches_debug_36801
rasdani/github-patches
git_diff
pypa__pip-7216
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add SSL CA certificate information to `pip debug` **What's the problem this feature will solve?** As described in [#6720 (comment)](https://github.com/pypa/pip/issues/6720#issuecomment-538791684), pip may be using several sources of information for the CA certificate bundle to use for HTTPS requests. This makes it hard to debug user issues. **Describe the solution you'd like** In the output of `pip debug` we should include: * the `cert` setting from the highest-priority pip configuration file (~~and the configuration file path~~) - on second thought the location doesn't matter much * `os.environ.get('REQUESTS_CA_BUNDLE')` * `os.environ.get('CURL_CA_BUNDLE')` * `pip._vendor.certifi.where()` This will provide insight into the CA certificate bundle in use for a given request, which can then be used in instructions to the user in conjunction with curl/openssl to submit an HTTP request independent of pip and rule out pip-specific issues. **Alternative Solutions** Do nothing. **Additional context** * #4459 * #4919 * #6335 * #6720 * #6915 </issue> <code> [start of src/pip/_internal/commands/debug.py] 1 # The following comment should be removed at some point in the future. 2 # mypy: disallow-untyped-defs=False 3 4 from __future__ import absolute_import 5 6 import locale 7 import logging 8 import sys 9 10 from pip._internal.cli import cmdoptions 11 from pip._internal.cli.base_command import Command 12 from pip._internal.cli.cmdoptions import make_target_python 13 from pip._internal.cli.status_codes import SUCCESS 14 from pip._internal.utils.logging import indent_log 15 from pip._internal.utils.misc import get_pip_version 16 from pip._internal.utils.typing import MYPY_CHECK_RUNNING 17 from pip._internal.wheel import format_tag 18 19 if MYPY_CHECK_RUNNING: 20 from typing import Any, List 21 from optparse import Values 22 23 logger = logging.getLogger(__name__) 24 25 26 def show_value(name, value): 27 # type: (str, str) -> None 28 logger.info('{}: {}'.format(name, value)) 29 30 31 def show_sys_implementation(): 32 # type: () -> None 33 logger.info('sys.implementation:') 34 if hasattr(sys, 'implementation'): 35 implementation = sys.implementation # type: ignore 36 implementation_name = implementation.name 37 else: 38 implementation_name = '' 39 40 with indent_log(): 41 show_value('name', implementation_name) 42 43 44 def show_tags(options): 45 # type: (Values) -> None 46 tag_limit = 10 47 48 target_python = make_target_python(options) 49 tags = target_python.get_tags() 50 51 # Display the target options that were explicitly provided. 52 formatted_target = target_python.format_given() 53 suffix = '' 54 if formatted_target: 55 suffix = ' (target: {})'.format(formatted_target) 56 57 msg = 'Compatible tags: {}{}'.format(len(tags), suffix) 58 logger.info(msg) 59 60 if options.verbose < 1 and len(tags) > tag_limit: 61 tags_limited = True 62 tags = tags[:tag_limit] 63 else: 64 tags_limited = False 65 66 with indent_log(): 67 for tag in tags: 68 logger.info(format_tag(tag)) 69 70 if tags_limited: 71 msg = ( 72 '...\n' 73 '[First {tag_limit} tags shown. Pass --verbose to show all.]' 74 ).format(tag_limit=tag_limit) 75 logger.info(msg) 76 77 78 class DebugCommand(Command): 79 """ 80 Display debug information. 81 """ 82 83 usage = """ 84 %prog <options>""" 85 ignore_require_venv = True 86 87 def __init__(self, *args, **kw): 88 super(DebugCommand, self).__init__(*args, **kw) 89 90 cmd_opts = self.cmd_opts 91 cmdoptions.add_target_python_options(cmd_opts) 92 self.parser.insert_option_group(0, cmd_opts) 93 94 def run(self, options, args): 95 # type: (Values, List[Any]) -> int 96 logger.warning( 97 "This command is only meant for debugging. " 98 "Do not use this with automation for parsing and getting these " 99 "details, since the output and options of this command may " 100 "change without notice." 101 ) 102 show_value('pip version', get_pip_version()) 103 show_value('sys.version', sys.version) 104 show_value('sys.executable', sys.executable) 105 show_value('sys.getdefaultencoding', sys.getdefaultencoding()) 106 show_value('sys.getfilesystemencoding', sys.getfilesystemencoding()) 107 show_value( 108 'locale.getpreferredencoding', locale.getpreferredencoding(), 109 ) 110 show_value('sys.platform', sys.platform) 111 show_sys_implementation() 112 113 show_tags(options) 114 115 return SUCCESS 116 [end of src/pip/_internal/commands/debug.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/pip/_internal/commands/debug.py b/src/pip/_internal/commands/debug.py --- a/src/pip/_internal/commands/debug.py +++ b/src/pip/_internal/commands/debug.py @@ -5,8 +5,11 @@ import locale import logging +import os import sys +from pip._vendor.certifi import where + from pip._internal.cli import cmdoptions from pip._internal.cli.base_command import Command from pip._internal.cli.cmdoptions import make_target_python @@ -17,14 +20,14 @@ from pip._internal.wheel import format_tag if MYPY_CHECK_RUNNING: - from typing import Any, List + from typing import Any, List, Optional from optparse import Values logger = logging.getLogger(__name__) def show_value(name, value): - # type: (str, str) -> None + # type: (str, Optional[str]) -> None logger.info('{}: {}'.format(name, value)) @@ -75,6 +78,25 @@ logger.info(msg) +def ca_bundle_info(config): + levels = set() + for key, value in config.items(): + levels.add(key.split('.')[0]) + + if not levels: + return "Not specified" + + levels_that_override_global = ['install', 'wheel', 'download'] + global_overriding_level = [ + level for level in levels if level in levels_that_override_global + ] + if not global_overriding_level: + return 'global' + + levels.remove('global') + return ", ".join(levels) + + class DebugCommand(Command): """ Display debug information. @@ -90,6 +112,7 @@ cmd_opts = self.cmd_opts cmdoptions.add_target_python_options(cmd_opts) self.parser.insert_option_group(0, cmd_opts) + self.parser.config.load() def run(self, options, args): # type: (Values, List[Any]) -> int @@ -110,6 +133,11 @@ show_value('sys.platform', sys.platform) show_sys_implementation() + show_value("'cert' config value", ca_bundle_info(self.parser.config)) + show_value("REQUESTS_CA_BUNDLE", os.environ.get('REQUESTS_CA_BUNDLE')) + show_value("CURL_CA_BUNDLE", os.environ.get('CURL_CA_BUNDLE')) + show_value("pip._vendor.certifi.where()", where()) + show_tags(options) return SUCCESS
{"golden_diff": "diff --git a/src/pip/_internal/commands/debug.py b/src/pip/_internal/commands/debug.py\n--- a/src/pip/_internal/commands/debug.py\n+++ b/src/pip/_internal/commands/debug.py\n@@ -5,8 +5,11 @@\n \n import locale\n import logging\n+import os\n import sys\n \n+from pip._vendor.certifi import where\n+\n from pip._internal.cli import cmdoptions\n from pip._internal.cli.base_command import Command\n from pip._internal.cli.cmdoptions import make_target_python\n@@ -17,14 +20,14 @@\n from pip._internal.wheel import format_tag\n \n if MYPY_CHECK_RUNNING:\n- from typing import Any, List\n+ from typing import Any, List, Optional\n from optparse import Values\n \n logger = logging.getLogger(__name__)\n \n \n def show_value(name, value):\n- # type: (str, str) -> None\n+ # type: (str, Optional[str]) -> None\n logger.info('{}: {}'.format(name, value))\n \n \n@@ -75,6 +78,25 @@\n logger.info(msg)\n \n \n+def ca_bundle_info(config):\n+ levels = set()\n+ for key, value in config.items():\n+ levels.add(key.split('.')[0])\n+\n+ if not levels:\n+ return \"Not specified\"\n+\n+ levels_that_override_global = ['install', 'wheel', 'download']\n+ global_overriding_level = [\n+ level for level in levels if level in levels_that_override_global\n+ ]\n+ if not global_overriding_level:\n+ return 'global'\n+\n+ levels.remove('global')\n+ return \", \".join(levels)\n+\n+\n class DebugCommand(Command):\n \"\"\"\n Display debug information.\n@@ -90,6 +112,7 @@\n cmd_opts = self.cmd_opts\n cmdoptions.add_target_python_options(cmd_opts)\n self.parser.insert_option_group(0, cmd_opts)\n+ self.parser.config.load()\n \n def run(self, options, args):\n # type: (Values, List[Any]) -> int\n@@ -110,6 +133,11 @@\n show_value('sys.platform', sys.platform)\n show_sys_implementation()\n \n+ show_value(\"'cert' config value\", ca_bundle_info(self.parser.config))\n+ show_value(\"REQUESTS_CA_BUNDLE\", os.environ.get('REQUESTS_CA_BUNDLE'))\n+ show_value(\"CURL_CA_BUNDLE\", os.environ.get('CURL_CA_BUNDLE'))\n+ show_value(\"pip._vendor.certifi.where()\", where())\n+\n show_tags(options)\n \n return SUCCESS\n", "issue": "Add SSL CA certificate information to `pip debug`\n**What's the problem this feature will solve?**\r\n\r\nAs described in [#6720 (comment)](https://github.com/pypa/pip/issues/6720#issuecomment-538791684), pip may be using several sources of information for the CA certificate bundle to use for HTTPS requests. This makes it hard to debug user issues.\r\n\r\n**Describe the solution you'd like**\r\n\r\nIn the output of `pip debug` we should include:\r\n\r\n* the `cert` setting from the highest-priority pip configuration file (~~and the configuration file path~~) - on second thought the location doesn't matter much\r\n* `os.environ.get('REQUESTS_CA_BUNDLE')`\r\n* `os.environ.get('CURL_CA_BUNDLE')`\r\n* `pip._vendor.certifi.where()`\r\n\r\nThis will provide insight into the CA certificate bundle in use for a given request, which can then be used in instructions to the user in conjunction with curl/openssl to submit an HTTP request independent of pip and rule out pip-specific issues.\r\n\r\n**Alternative Solutions**\r\n\r\nDo nothing.\r\n\r\n**Additional context**\r\n\r\n* #4459\r\n* #4919\r\n* #6335\r\n* #6720\r\n* #6915\n", "before_files": [{"content": "# The following comment should be removed at some point in the future.\n# mypy: disallow-untyped-defs=False\n\nfrom __future__ import absolute_import\n\nimport locale\nimport logging\nimport sys\n\nfrom pip._internal.cli import cmdoptions\nfrom pip._internal.cli.base_command import Command\nfrom pip._internal.cli.cmdoptions import make_target_python\nfrom pip._internal.cli.status_codes import SUCCESS\nfrom pip._internal.utils.logging import indent_log\nfrom pip._internal.utils.misc import get_pip_version\nfrom pip._internal.utils.typing import MYPY_CHECK_RUNNING\nfrom pip._internal.wheel import format_tag\n\nif MYPY_CHECK_RUNNING:\n from typing import Any, List\n from optparse import Values\n\nlogger = logging.getLogger(__name__)\n\n\ndef show_value(name, value):\n # type: (str, str) -> None\n logger.info('{}: {}'.format(name, value))\n\n\ndef show_sys_implementation():\n # type: () -> None\n logger.info('sys.implementation:')\n if hasattr(sys, 'implementation'):\n implementation = sys.implementation # type: ignore\n implementation_name = implementation.name\n else:\n implementation_name = ''\n\n with indent_log():\n show_value('name', implementation_name)\n\n\ndef show_tags(options):\n # type: (Values) -> None\n tag_limit = 10\n\n target_python = make_target_python(options)\n tags = target_python.get_tags()\n\n # Display the target options that were explicitly provided.\n formatted_target = target_python.format_given()\n suffix = ''\n if formatted_target:\n suffix = ' (target: {})'.format(formatted_target)\n\n msg = 'Compatible tags: {}{}'.format(len(tags), suffix)\n logger.info(msg)\n\n if options.verbose < 1 and len(tags) > tag_limit:\n tags_limited = True\n tags = tags[:tag_limit]\n else:\n tags_limited = False\n\n with indent_log():\n for tag in tags:\n logger.info(format_tag(tag))\n\n if tags_limited:\n msg = (\n '...\\n'\n '[First {tag_limit} tags shown. Pass --verbose to show all.]'\n ).format(tag_limit=tag_limit)\n logger.info(msg)\n\n\nclass DebugCommand(Command):\n \"\"\"\n Display debug information.\n \"\"\"\n\n usage = \"\"\"\n %prog <options>\"\"\"\n ignore_require_venv = True\n\n def __init__(self, *args, **kw):\n super(DebugCommand, self).__init__(*args, **kw)\n\n cmd_opts = self.cmd_opts\n cmdoptions.add_target_python_options(cmd_opts)\n self.parser.insert_option_group(0, cmd_opts)\n\n def run(self, options, args):\n # type: (Values, List[Any]) -> int\n logger.warning(\n \"This command is only meant for debugging. \"\n \"Do not use this with automation for parsing and getting these \"\n \"details, since the output and options of this command may \"\n \"change without notice.\"\n )\n show_value('pip version', get_pip_version())\n show_value('sys.version', sys.version)\n show_value('sys.executable', sys.executable)\n show_value('sys.getdefaultencoding', sys.getdefaultencoding())\n show_value('sys.getfilesystemencoding', sys.getfilesystemencoding())\n show_value(\n 'locale.getpreferredencoding', locale.getpreferredencoding(),\n )\n show_value('sys.platform', sys.platform)\n show_sys_implementation()\n\n show_tags(options)\n\n return SUCCESS\n", "path": "src/pip/_internal/commands/debug.py"}]}
1,823
569
gh_patches_debug_4385
rasdani/github-patches
git_diff
spack__spack-18515
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> SpecList has instances of mutable types as default __init__ arguments While debugging #18338 I stepped on this definition: https://github.com/spack/spack/blob/3701633937a80c56ce212cd77c82c082f705e7ad/lib/spack/spack/spec_list.py#L26-L30 that might cause unwanted interactions between different instances of `SpecList` if they use defaults for `reference` (and is difficult to read if default for `yaml_list` is used). This issue is a reminder to refactor this part of the code. </issue> <code> [start of lib/spack/spack/spec_list.py] 1 # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other 2 # Spack Project Developers. See the top-level COPYRIGHT file for details. 3 # 4 # SPDX-License-Identifier: (Apache-2.0 OR MIT) 5 import itertools 6 from six import string_types 7 8 import spack.variant 9 from spack.spec import Spec 10 from spack.error import SpackError 11 12 13 def spec_ordering_key(s): 14 if s.startswith('^'): 15 return 5 16 elif s.startswith('/'): 17 return 4 18 elif s.startswith('%'): 19 return 3 20 elif any(s.startswith(c) for c in '~-+@') or '=' in s: 21 return 2 22 else: 23 return 1 24 25 26 class SpecList(object): 27 28 def __init__(self, name='specs', yaml_list=[], reference={}): 29 self.name = name 30 self._reference = reference # TODO: Do we need defensive copy here? 31 32 # Validate yaml_list before assigning 33 if not all(isinstance(s, string_types) or isinstance(s, (list, dict)) 34 for s in yaml_list): 35 raise ValueError( 36 "yaml_list can contain only valid YAML types! Found:\n %s" 37 % [type(s) for s in yaml_list]) 38 self.yaml_list = yaml_list[:] 39 40 # Expansions can be expensive to compute and difficult to keep updated 41 # We cache results and invalidate when self.yaml_list changes 42 self._expanded_list = None 43 self._constraints = None 44 self._specs = None 45 46 @property 47 def specs_as_yaml_list(self): 48 if self._expanded_list is None: 49 self._expanded_list = self._expand_references(self.yaml_list) 50 return self._expanded_list 51 52 @property 53 def specs_as_constraints(self): 54 if self._constraints is None: 55 constraints = [] 56 for item in self.specs_as_yaml_list: 57 if isinstance(item, dict): # matrix of specs 58 constraints.extend(_expand_matrix_constraints(item)) 59 else: # individual spec 60 constraints.append([Spec(item)]) 61 self._constraints = constraints 62 63 return self._constraints 64 65 @property 66 def specs(self): 67 if self._specs is None: 68 specs = [] 69 # This could be slightly faster done directly from yaml_list, 70 # but this way is easier to maintain. 71 for constraint_list in self.specs_as_constraints: 72 spec = constraint_list[0].copy() 73 for const in constraint_list[1:]: 74 spec.constrain(const) 75 specs.append(spec) 76 self._specs = specs 77 78 return self._specs 79 80 def add(self, spec): 81 self.yaml_list.append(str(spec)) 82 83 # expanded list can be updated without invalidation 84 if self._expanded_list is not None: 85 self._expanded_list.append(str(spec)) 86 87 # Invalidate cache variables when we change the list 88 self._constraints = None 89 self._specs = None 90 91 def remove(self, spec): 92 # Get spec to remove from list 93 remove = [s for s in self.yaml_list 94 if (isinstance(s, string_types) and not s.startswith('$')) 95 and Spec(s) == Spec(spec)] 96 if not remove: 97 msg = 'Cannot remove %s from SpecList %s\n' % (spec, self.name) 98 msg += 'Either %s is not in %s or %s is ' % (spec, self.name, spec) 99 msg += 'expanded from a matrix and cannot be removed directly.' 100 raise SpecListError(msg) 101 assert len(remove) == 1 102 self.yaml_list.remove(remove[0]) 103 104 # invalidate cache variables when we change the list 105 self._expanded_list = None 106 self._constraints = None 107 self._specs = None 108 109 def extend(self, other, copy_reference=True): 110 self.yaml_list.extend(other.yaml_list) 111 self._expanded_list = None 112 self._constraints = None 113 self._specs = None 114 115 if copy_reference: 116 self._reference = other._reference 117 118 def update_reference(self, reference): 119 self._reference = reference 120 self._expanded_list = None 121 self._constraints = None 122 self._specs = None 123 124 def _parse_reference(self, name): 125 sigil = '' 126 name = name[1:] 127 128 # Parse specs as constraints 129 if name.startswith('^') or name.startswith('%'): 130 sigil = name[0] 131 name = name[1:] 132 133 # Make sure the reference is valid 134 if name not in self._reference: 135 msg = 'SpecList %s refers to ' % self.name 136 msg += 'named list %s ' % name 137 msg += 'which does not appear in its reference dict' 138 raise UndefinedReferenceError(msg) 139 140 return (name, sigil) 141 142 def _expand_references(self, yaml): 143 if isinstance(yaml, list): 144 ret = [] 145 146 for item in yaml: 147 # if it's a reference, expand it 148 if isinstance(item, string_types) and item.startswith('$'): 149 # replace the reference and apply the sigil if needed 150 name, sigil = self._parse_reference(item) 151 referent = [ 152 _sigilify(item, sigil) 153 for item in self._reference[name].specs_as_yaml_list 154 ] 155 ret.extend(referent) 156 else: 157 # else just recurse 158 ret.append(self._expand_references(item)) 159 return ret 160 elif isinstance(yaml, dict): 161 # There can't be expansions in dicts 162 return dict((name, self._expand_references(val)) 163 for (name, val) in yaml.items()) 164 else: 165 # Strings are just returned 166 return yaml 167 168 def __len__(self): 169 return len(self.specs) 170 171 def __getitem__(self, key): 172 return self.specs[key] 173 174 175 def _expand_matrix_constraints(object, specify=True): 176 # recurse so we can handle nexted matrices 177 expanded_rows = [] 178 for row in object['matrix']: 179 new_row = [] 180 for r in row: 181 if isinstance(r, dict): 182 new_row.extend( 183 [[' '.join(c)] 184 for c in _expand_matrix_constraints(r, specify=False)]) 185 else: 186 new_row.append([r]) 187 expanded_rows.append(new_row) 188 189 excludes = object.get('exclude', []) # only compute once 190 sigil = object.get('sigil', '') 191 192 results = [] 193 for combo in itertools.product(*expanded_rows): 194 # Construct a combined spec to test against excludes 195 flat_combo = [constraint for list in combo for constraint in list] 196 ordered_combo = sorted(flat_combo, key=spec_ordering_key) 197 198 test_spec = Spec(' '.join(ordered_combo)) 199 # Abstract variants don't have normal satisfaction semantics 200 # Convert all variants to concrete types. 201 # This method is best effort, so all existing variants will be 202 # converted before any error is raised. 203 # Catch exceptions because we want to be able to operate on 204 # abstract specs without needing package information 205 try: 206 spack.variant.substitute_abstract_variants(test_spec) 207 except spack.variant.UnknownVariantError: 208 pass 209 if any(test_spec.satisfies(x) for x in excludes): 210 continue 211 212 if sigil: # add sigil if necessary 213 ordered_combo[0] = sigil + ordered_combo[0] 214 215 # Add to list of constraints 216 if specify: 217 results.append([Spec(x) for x in ordered_combo]) 218 else: 219 results.append(ordered_combo) 220 return results 221 222 223 def _sigilify(item, sigil): 224 if isinstance(item, dict): 225 if sigil: 226 item['sigil'] = sigil 227 return item 228 else: 229 return sigil + item 230 231 232 class SpecListError(SpackError): 233 """Error class for all errors related to SpecList objects.""" 234 235 236 class UndefinedReferenceError(SpecListError): 237 """Error class for undefined references in Spack stacks.""" 238 239 240 class InvalidSpecConstraintError(SpecListError): 241 """Error class for invalid spec constraints at concretize time.""" 242 [end of lib/spack/spack/spec_list.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lib/spack/spack/spec_list.py b/lib/spack/spack/spec_list.py --- a/lib/spack/spack/spec_list.py +++ b/lib/spack/spack/spec_list.py @@ -25,7 +25,11 @@ class SpecList(object): - def __init__(self, name='specs', yaml_list=[], reference={}): + def __init__(self, name='specs', yaml_list=None, reference=None): + # Normalize input arguments + yaml_list = yaml_list or [] + reference = reference or {} + self.name = name self._reference = reference # TODO: Do we need defensive copy here?
{"golden_diff": "diff --git a/lib/spack/spack/spec_list.py b/lib/spack/spack/spec_list.py\n--- a/lib/spack/spack/spec_list.py\n+++ b/lib/spack/spack/spec_list.py\n@@ -25,7 +25,11 @@\n \n class SpecList(object):\n \n- def __init__(self, name='specs', yaml_list=[], reference={}):\n+ def __init__(self, name='specs', yaml_list=None, reference=None):\n+ # Normalize input arguments\n+ yaml_list = yaml_list or []\n+ reference = reference or {}\n+\n self.name = name\n self._reference = reference # TODO: Do we need defensive copy here?\n", "issue": "SpecList has instances of mutable types as default __init__ arguments\nWhile debugging #18338 I stepped on this definition:\r\n\r\nhttps://github.com/spack/spack/blob/3701633937a80c56ce212cd77c82c082f705e7ad/lib/spack/spack/spec_list.py#L26-L30\r\n\r\nthat might cause unwanted interactions between different instances of `SpecList` if they use defaults for `reference` (and is difficult to read if default for `yaml_list` is used). This issue is a reminder to refactor this part of the code.\n", "before_files": [{"content": "# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\nimport itertools\nfrom six import string_types\n\nimport spack.variant\nfrom spack.spec import Spec\nfrom spack.error import SpackError\n\n\ndef spec_ordering_key(s):\n if s.startswith('^'):\n return 5\n elif s.startswith('/'):\n return 4\n elif s.startswith('%'):\n return 3\n elif any(s.startswith(c) for c in '~-+@') or '=' in s:\n return 2\n else:\n return 1\n\n\nclass SpecList(object):\n\n def __init__(self, name='specs', yaml_list=[], reference={}):\n self.name = name\n self._reference = reference # TODO: Do we need defensive copy here?\n\n # Validate yaml_list before assigning\n if not all(isinstance(s, string_types) or isinstance(s, (list, dict))\n for s in yaml_list):\n raise ValueError(\n \"yaml_list can contain only valid YAML types! Found:\\n %s\"\n % [type(s) for s in yaml_list])\n self.yaml_list = yaml_list[:]\n\n # Expansions can be expensive to compute and difficult to keep updated\n # We cache results and invalidate when self.yaml_list changes\n self._expanded_list = None\n self._constraints = None\n self._specs = None\n\n @property\n def specs_as_yaml_list(self):\n if self._expanded_list is None:\n self._expanded_list = self._expand_references(self.yaml_list)\n return self._expanded_list\n\n @property\n def specs_as_constraints(self):\n if self._constraints is None:\n constraints = []\n for item in self.specs_as_yaml_list:\n if isinstance(item, dict): # matrix of specs\n constraints.extend(_expand_matrix_constraints(item))\n else: # individual spec\n constraints.append([Spec(item)])\n self._constraints = constraints\n\n return self._constraints\n\n @property\n def specs(self):\n if self._specs is None:\n specs = []\n # This could be slightly faster done directly from yaml_list,\n # but this way is easier to maintain.\n for constraint_list in self.specs_as_constraints:\n spec = constraint_list[0].copy()\n for const in constraint_list[1:]:\n spec.constrain(const)\n specs.append(spec)\n self._specs = specs\n\n return self._specs\n\n def add(self, spec):\n self.yaml_list.append(str(spec))\n\n # expanded list can be updated without invalidation\n if self._expanded_list is not None:\n self._expanded_list.append(str(spec))\n\n # Invalidate cache variables when we change the list\n self._constraints = None\n self._specs = None\n\n def remove(self, spec):\n # Get spec to remove from list\n remove = [s for s in self.yaml_list\n if (isinstance(s, string_types) and not s.startswith('$'))\n and Spec(s) == Spec(spec)]\n if not remove:\n msg = 'Cannot remove %s from SpecList %s\\n' % (spec, self.name)\n msg += 'Either %s is not in %s or %s is ' % (spec, self.name, spec)\n msg += 'expanded from a matrix and cannot be removed directly.'\n raise SpecListError(msg)\n assert len(remove) == 1\n self.yaml_list.remove(remove[0])\n\n # invalidate cache variables when we change the list\n self._expanded_list = None\n self._constraints = None\n self._specs = None\n\n def extend(self, other, copy_reference=True):\n self.yaml_list.extend(other.yaml_list)\n self._expanded_list = None\n self._constraints = None\n self._specs = None\n\n if copy_reference:\n self._reference = other._reference\n\n def update_reference(self, reference):\n self._reference = reference\n self._expanded_list = None\n self._constraints = None\n self._specs = None\n\n def _parse_reference(self, name):\n sigil = ''\n name = name[1:]\n\n # Parse specs as constraints\n if name.startswith('^') or name.startswith('%'):\n sigil = name[0]\n name = name[1:]\n\n # Make sure the reference is valid\n if name not in self._reference:\n msg = 'SpecList %s refers to ' % self.name\n msg += 'named list %s ' % name\n msg += 'which does not appear in its reference dict'\n raise UndefinedReferenceError(msg)\n\n return (name, sigil)\n\n def _expand_references(self, yaml):\n if isinstance(yaml, list):\n ret = []\n\n for item in yaml:\n # if it's a reference, expand it\n if isinstance(item, string_types) and item.startswith('$'):\n # replace the reference and apply the sigil if needed\n name, sigil = self._parse_reference(item)\n referent = [\n _sigilify(item, sigil)\n for item in self._reference[name].specs_as_yaml_list\n ]\n ret.extend(referent)\n else:\n # else just recurse\n ret.append(self._expand_references(item))\n return ret\n elif isinstance(yaml, dict):\n # There can't be expansions in dicts\n return dict((name, self._expand_references(val))\n for (name, val) in yaml.items())\n else:\n # Strings are just returned\n return yaml\n\n def __len__(self):\n return len(self.specs)\n\n def __getitem__(self, key):\n return self.specs[key]\n\n\ndef _expand_matrix_constraints(object, specify=True):\n # recurse so we can handle nexted matrices\n expanded_rows = []\n for row in object['matrix']:\n new_row = []\n for r in row:\n if isinstance(r, dict):\n new_row.extend(\n [[' '.join(c)]\n for c in _expand_matrix_constraints(r, specify=False)])\n else:\n new_row.append([r])\n expanded_rows.append(new_row)\n\n excludes = object.get('exclude', []) # only compute once\n sigil = object.get('sigil', '')\n\n results = []\n for combo in itertools.product(*expanded_rows):\n # Construct a combined spec to test against excludes\n flat_combo = [constraint for list in combo for constraint in list]\n ordered_combo = sorted(flat_combo, key=spec_ordering_key)\n\n test_spec = Spec(' '.join(ordered_combo))\n # Abstract variants don't have normal satisfaction semantics\n # Convert all variants to concrete types.\n # This method is best effort, so all existing variants will be\n # converted before any error is raised.\n # Catch exceptions because we want to be able to operate on\n # abstract specs without needing package information\n try:\n spack.variant.substitute_abstract_variants(test_spec)\n except spack.variant.UnknownVariantError:\n pass\n if any(test_spec.satisfies(x) for x in excludes):\n continue\n\n if sigil: # add sigil if necessary\n ordered_combo[0] = sigil + ordered_combo[0]\n\n # Add to list of constraints\n if specify:\n results.append([Spec(x) for x in ordered_combo])\n else:\n results.append(ordered_combo)\n return results\n\n\ndef _sigilify(item, sigil):\n if isinstance(item, dict):\n if sigil:\n item['sigil'] = sigil\n return item\n else:\n return sigil + item\n\n\nclass SpecListError(SpackError):\n \"\"\"Error class for all errors related to SpecList objects.\"\"\"\n\n\nclass UndefinedReferenceError(SpecListError):\n \"\"\"Error class for undefined references in Spack stacks.\"\"\"\n\n\nclass InvalidSpecConstraintError(SpecListError):\n \"\"\"Error class for invalid spec constraints at concretize time.\"\"\"\n", "path": "lib/spack/spack/spec_list.py"}]}
3,063
148
gh_patches_debug_19632
rasdani/github-patches
git_diff
networkx__networkx-3628
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> jit json import/export I would consider the functions `jit_data` and `jit_graph` to be their inverses, so that ``` import networkx as nx nx.jit_graph(nx.jit_data(nx.Graph())) ``` works. Instead, it produces a TypeError (nx 2.2), because jit_data is a function `nx graph -> json string`, while jit_graph is a function `json object -> nx graph`, so that the correct program would be ``` import networkx as nx import json nx.jit_graph(json.loads(nx.jit_data(nx.Graph()))) ``` This is documented, but in my view unexpected and incoherent behavior. I'm pretty new to networkx and are not familiar with your design philosophy, but see the options * to add a clarifying note in the documentation OR * return the json object in `jit_data` OR * make use of the json.loads function in `jit_graph`. What are your opinions on this? I am willing to submit a PR (but probably it is just easier for you to make that oneline-commit, so that's also fine :)) </issue> <code> [start of networkx/readwrite/json_graph/jit.py] 1 # Copyright (C) 2011-2019 by 2 # Aric Hagberg <[email protected]> 3 # Dan Schult <[email protected]> 4 # Pieter Swart <[email protected]> 5 # All rights reserved. 6 # BSD license. 7 8 """ 9 Read and write NetworkX graphs as JavaScript InfoVis Toolkit (JIT) format JSON. 10 11 See the `JIT documentation`_ for more examples. 12 13 Format 14 ------ 15 var json = [ 16 { 17 "id": "aUniqueIdentifier", 18 "name": "usually a nodes name", 19 "data": { 20 "some key": "some value", 21 "some other key": "some other value" 22 }, 23 "adjacencies": [ 24 { 25 nodeTo:"aNodeId", 26 data: {} //put whatever you want here 27 }, 28 'other adjacencies go here...' 29 }, 30 31 'other nodes go here...' 32 ]; 33 .. _JIT documentation: http://thejit.org 34 """ 35 36 import json 37 import networkx as nx 38 from networkx.utils.decorators import not_implemented_for 39 40 __all__ = ['jit_graph', 'jit_data'] 41 42 43 def jit_graph(data, create_using=None): 44 """Read a graph from JIT JSON. 45 46 Parameters 47 ---------- 48 data : JSON Graph Object 49 50 create_using : Networkx Graph, optional (default: Graph()) 51 Return graph of this type. The provided instance will be cleared. 52 53 Returns 54 ------- 55 G : NetworkX Graph built from create_using if provided. 56 """ 57 if create_using is None: 58 G = nx.Graph() 59 else: 60 G = create_using 61 G.clear() 62 63 for node in data: 64 G.add_node(node['id'], **node['data']) 65 if node.get('adjacencies') is not None: 66 for adj in node['adjacencies']: 67 G.add_edge(node['id'], adj['nodeTo'], **adj['data']) 68 return G 69 70 71 @not_implemented_for('multigraph') 72 def jit_data(G, indent=None): 73 """Returns data in JIT JSON format. 74 75 Parameters 76 ---------- 77 G : NetworkX Graph 78 79 indent: optional, default=None 80 If indent is a non-negative integer, then JSON array elements and object 81 members will be pretty-printed with that indent level. An indent level 82 of 0, or negative, will only insert newlines. None (the default) selects 83 the most compact representation. 84 85 Returns 86 ------- 87 data: JIT JSON string 88 """ 89 json_graph = [] 90 for node in G.nodes(): 91 json_node = { 92 "id": node, 93 "name": node 94 } 95 # node data 96 json_node["data"] = G.nodes[node] 97 # adjacencies 98 if G[node]: 99 json_node["adjacencies"] = [] 100 for neighbour in G[node]: 101 adjacency = { 102 "nodeTo": neighbour, 103 } 104 # adjacency data 105 adjacency["data"] = G.edges[node, neighbour] 106 json_node["adjacencies"].append(adjacency) 107 json_graph.append(json_node) 108 return json.dumps(json_graph, indent=indent) 109 [end of networkx/readwrite/json_graph/jit.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/networkx/readwrite/json_graph/jit.py b/networkx/readwrite/json_graph/jit.py --- a/networkx/readwrite/json_graph/jit.py +++ b/networkx/readwrite/json_graph/jit.py @@ -60,6 +60,9 @@ G = create_using G.clear() + if nx.utils.is_string_like(data): + data = json.loads(data) + for node in data: G.add_node(node['id'], **node['data']) if node.get('adjacencies') is not None: @@ -77,10 +80,10 @@ G : NetworkX Graph indent: optional, default=None - If indent is a non-negative integer, then JSON array elements and object - members will be pretty-printed with that indent level. An indent level - of 0, or negative, will only insert newlines. None (the default) selects - the most compact representation. + If indent is a non-negative integer, then JSON array elements and + object members will be pretty-printed with that indent level. + An indent level of 0, or negative, will only insert newlines. + None (the default) selects the most compact representation. Returns -------
{"golden_diff": "diff --git a/networkx/readwrite/json_graph/jit.py b/networkx/readwrite/json_graph/jit.py\n--- a/networkx/readwrite/json_graph/jit.py\n+++ b/networkx/readwrite/json_graph/jit.py\n@@ -60,6 +60,9 @@\n G = create_using\n G.clear()\n \n+ if nx.utils.is_string_like(data):\n+ data = json.loads(data)\n+\n for node in data:\n G.add_node(node['id'], **node['data'])\n if node.get('adjacencies') is not None:\n@@ -77,10 +80,10 @@\n G : NetworkX Graph\n \n indent: optional, default=None\n- If indent is a non-negative integer, then JSON array elements and object\n- members will be pretty-printed with that indent level. An indent level\n- of 0, or negative, will only insert newlines. None (the default) selects\n- the most compact representation.\n+ If indent is a non-negative integer, then JSON array elements and\n+ object members will be pretty-printed with that indent level.\n+ An indent level of 0, or negative, will only insert newlines.\n+ None (the default) selects the most compact representation.\n \n Returns\n -------\n", "issue": "jit json import/export\nI would consider the functions `jit_data` and `jit_graph` to be their inverses, so that\r\n```\r\nimport networkx as nx\r\nnx.jit_graph(nx.jit_data(nx.Graph()))\r\n```\r\nworks.\r\n\r\nInstead, it produces a TypeError (nx 2.2), because jit_data is a function `nx graph -> json string`, while jit_graph is a function `json object -> nx graph`, so that the correct program would be\r\n```\r\nimport networkx as nx\r\nimport json\r\nnx.jit_graph(json.loads(nx.jit_data(nx.Graph())))\r\n```\r\n\r\nThis is documented, but in my view unexpected and incoherent behavior. I'm pretty new to networkx and are not familiar with your design philosophy, but see the options\r\n* to add a clarifying note in the documentation OR\r\n* return the json object in `jit_data` OR\r\n* make use of the json.loads function in `jit_graph`.\r\n\r\nWhat are your opinions on this?\r\nI am willing to submit a PR (but probably it is just easier for you to make that oneline-commit, so that's also fine :))\n", "before_files": [{"content": "# Copyright (C) 2011-2019 by\n# Aric Hagberg <[email protected]>\n# Dan Schult <[email protected]>\n# Pieter Swart <[email protected]>\n# All rights reserved.\n# BSD license.\n\n\"\"\"\nRead and write NetworkX graphs as JavaScript InfoVis Toolkit (JIT) format JSON.\n\nSee the `JIT documentation`_ for more examples.\n\nFormat\n------\nvar json = [\n {\n \"id\": \"aUniqueIdentifier\",\n \"name\": \"usually a nodes name\",\n \"data\": {\n \"some key\": \"some value\",\n \"some other key\": \"some other value\"\n },\n \"adjacencies\": [\n {\n nodeTo:\"aNodeId\",\n data: {} //put whatever you want here\n },\n 'other adjacencies go here...'\n },\n\n 'other nodes go here...'\n];\n.. _JIT documentation: http://thejit.org\n\"\"\"\n\nimport json\nimport networkx as nx\nfrom networkx.utils.decorators import not_implemented_for\n\n__all__ = ['jit_graph', 'jit_data']\n\n\ndef jit_graph(data, create_using=None):\n \"\"\"Read a graph from JIT JSON.\n\n Parameters\n ----------\n data : JSON Graph Object\n\n create_using : Networkx Graph, optional (default: Graph())\n Return graph of this type. The provided instance will be cleared.\n\n Returns\n -------\n G : NetworkX Graph built from create_using if provided.\n \"\"\"\n if create_using is None:\n G = nx.Graph()\n else:\n G = create_using\n G.clear()\n\n for node in data:\n G.add_node(node['id'], **node['data'])\n if node.get('adjacencies') is not None:\n for adj in node['adjacencies']:\n G.add_edge(node['id'], adj['nodeTo'], **adj['data'])\n return G\n\n\n@not_implemented_for('multigraph')\ndef jit_data(G, indent=None):\n \"\"\"Returns data in JIT JSON format.\n\n Parameters\n ----------\n G : NetworkX Graph\n\n indent: optional, default=None\n If indent is a non-negative integer, then JSON array elements and object\n members will be pretty-printed with that indent level. An indent level\n of 0, or negative, will only insert newlines. None (the default) selects\n the most compact representation.\n\n Returns\n -------\n data: JIT JSON string\n \"\"\"\n json_graph = []\n for node in G.nodes():\n json_node = {\n \"id\": node,\n \"name\": node\n }\n # node data\n json_node[\"data\"] = G.nodes[node]\n # adjacencies\n if G[node]:\n json_node[\"adjacencies\"] = []\n for neighbour in G[node]:\n adjacency = {\n \"nodeTo\": neighbour,\n }\n # adjacency data\n adjacency[\"data\"] = G.edges[node, neighbour]\n json_node[\"adjacencies\"].append(adjacency)\n json_graph.append(json_node)\n return json.dumps(json_graph, indent=indent)\n", "path": "networkx/readwrite/json_graph/jit.py"}]}
1,689
281
gh_patches_debug_12095
rasdani/github-patches
git_diff
spyder-ide__spyder-3550
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Console/Editor lose focus when auto-connected to help ## Description of your problem 1. Open spyder 2. Enable automatic connection to help in settings 3. Type in editor or console until the help connects, either by function(, or function? in the console Expected behaviour is for the help for that function to show and I can continue typing my code. What happens is that when the help activates, where I'm typing loses focus and I need to click back into the editor or console window to continue typing. The only way to write code in the editor without constantly being interrupted is to disable the help connection, which is annoying because thats one of my favourite parts of spyder. ## Versions and main components - Spyder Version: 3.0.0b4 - Python Version:3.5.2 - Operating system: Mac OSX 10.11.5 ## Dependencies jedi >=0.8.1 : 0.9.0 (OK) matplotlib >=1.0 : 1.5.1 (OK) nbconvert >=4.0 : 4.2.0 (OK) numpy >=1.7 : 1.11.1 (OK) pandas >=0.13.1 : 0.18.1 (OK) pep8 >=0.6 : 1.7.0 (OK) psutil >=0.3 : 4.3.0 (OK) pyflakes >=0.6.0 : 1.2.3 (OK) pygments >=2.0 : 2.1.3 (OK) qtconsole >=4.2.0: 4.2.1 (OK) rope >=0.9.4 : 0.9.4-1 (OK) sphinx >=0.6.6 : 1.4.1 (OK) sympy >=0.7.3 : 1.0 (OK) </issue> <code> [start of spyder/widgets/browser.py] 1 # -*- coding: utf-8 -*- 2 # 3 # Copyright © Spyder Project Contributors 4 # Licensed under the terms of the MIT License 5 # (see spyder/__init__.py for details) 6 7 """Simple web browser widget""" 8 9 # Standard library imports 10 import sys 11 12 # Third party imports 13 from qtpy.QtCore import QUrl, Signal, Slot 14 from qtpy.QtWidgets import (QFrame, QHBoxLayout, QLabel, QProgressBar, QMenu, 15 QVBoxLayout, QWidget) 16 from qtpy.QtWebEngineWidgets import (QWebEnginePage, QWebEngineSettings, 17 QWebEngineView, WEBENGINE) 18 19 # Local imports 20 from spyder.config.base import _, DEV 21 from spyder.py3compat import is_text_string, to_text_string 22 from spyder.utils.qthelpers import (action2button, add_actions, 23 create_action, create_toolbutton) 24 from spyder.utils import icon_manager as ima 25 from spyder.widgets.comboboxes import UrlComboBox 26 from spyder.widgets.findreplace import FindReplace 27 28 29 class WebPage(QWebEnginePage): 30 """ 31 Web page subclass to manage hyperlinks for WebEngine 32 33 Note: This can't be used for WebKit because the 34 acceptNavigationRequest method has a different 35 functionality for it. 36 """ 37 linkClicked = Signal(QUrl) 38 39 def acceptNavigationRequest(self, url, navigation_type, isMainFrame): 40 """ 41 Overloaded method to handle links ourselves 42 """ 43 if navigation_type == QWebEnginePage.NavigationTypeLinkClicked: 44 self.linkClicked.emit(url) 45 return False 46 return True 47 48 49 class WebView(QWebEngineView): 50 """Web view""" 51 def __init__(self, parent): 52 QWebEngineView.__init__(self, parent) 53 self.zoom_factor = 1. 54 self.zoom_out_action = create_action(self, _("Zoom out"), 55 icon=ima.icon('zoom_out'), 56 triggered=self.zoom_out) 57 self.zoom_in_action = create_action(self, _("Zoom in"), 58 icon=ima.icon('zoom_in'), 59 triggered=self.zoom_in) 60 if WEBENGINE: 61 web_page = WebPage(self) 62 self.setPage(web_page) 63 64 def find_text(self, text, changed=True, 65 forward=True, case=False, words=False, 66 regexp=False): 67 """Find text""" 68 if not WEBENGINE: 69 findflag = QWebEnginePage.FindWrapsAroundDocument 70 else: 71 findflag = 0 72 73 if not forward: 74 findflag = findflag | QWebEnginePage.FindBackward 75 if case: 76 findflag = findflag | QWebEnginePage.FindCaseSensitively 77 78 return self.findText(text, QWebEnginePage.FindFlags(findflag)) 79 80 def get_selected_text(self): 81 """Return text selected by current text cursor""" 82 return self.selectedText() 83 84 def set_font(self, font, fixed_font=None): 85 settings = self.page().settings() 86 for fontfamily in (settings.StandardFont, settings.SerifFont, 87 settings.SansSerifFont, settings.CursiveFont, 88 settings.FantasyFont): 89 settings.setFontFamily(fontfamily, font.family()) 90 if fixed_font is not None: 91 settings.setFontFamily(settings.FixedFont, fixed_font.family()) 92 size = font.pointSize() 93 settings.setFontSize(settings.DefaultFontSize, size) 94 settings.setFontSize(settings.DefaultFixedFontSize, size) 95 96 def apply_zoom_factor(self): 97 """Apply zoom factor""" 98 if hasattr(self, 'setZoomFactor'): 99 # Assuming Qt >=v4.5 100 self.setZoomFactor(self.zoom_factor) 101 else: 102 # Qt v4.4 103 self.setTextSizeMultiplier(self.zoom_factor) 104 105 def set_zoom_factor(self, zoom_factor): 106 """Set zoom factor""" 107 self.zoom_factor = zoom_factor 108 self.apply_zoom_factor() 109 110 def get_zoom_factor(self): 111 """Return zoom factor""" 112 return self.zoom_factor 113 114 @Slot() 115 def zoom_out(self): 116 """Zoom out""" 117 self.zoom_factor = max(.1, self.zoom_factor-.1) 118 self.apply_zoom_factor() 119 120 @Slot() 121 def zoom_in(self): 122 """Zoom in""" 123 self.zoom_factor += .1 124 self.apply_zoom_factor() 125 126 #------ QWebEngineView API ------------------------------------------------------- 127 def createWindow(self, webwindowtype): 128 import webbrowser 129 webbrowser.open(to_text_string(self.url().toString())) 130 131 def contextMenuEvent(self, event): 132 menu = QMenu(self) 133 actions = [self.pageAction(QWebEnginePage.Back), 134 self.pageAction(QWebEnginePage.Forward), None, 135 self.pageAction(QWebEnginePage.SelectAll), 136 self.pageAction(QWebEnginePage.Copy), None, 137 self.zoom_in_action, self.zoom_out_action] 138 if DEV and not WEBENGINE: 139 settings = self.page().settings() 140 settings.setAttribute(QWebEngineSettings.DeveloperExtrasEnabled, True) 141 actions += [None, self.pageAction(QWebEnginePage.InspectElement)] 142 add_actions(menu, actions) 143 menu.popup(event.globalPos()) 144 event.accept() 145 146 147 class WebBrowser(QWidget): 148 """ 149 Web browser widget 150 """ 151 def __init__(self, parent=None): 152 QWidget.__init__(self, parent) 153 154 self.home_url = None 155 156 self.webview = WebView(self) 157 self.webview.loadFinished.connect(self.load_finished) 158 self.webview.titleChanged.connect(self.setWindowTitle) 159 self.webview.urlChanged.connect(self.url_changed) 160 161 home_button = create_toolbutton(self, icon=ima.icon('home'), 162 tip=_("Home"), 163 triggered=self.go_home) 164 165 zoom_out_button = action2button(self.webview.zoom_out_action) 166 zoom_in_button = action2button(self.webview.zoom_in_action) 167 168 pageact2btn = lambda prop: action2button(self.webview.pageAction(prop), 169 parent=self.webview) 170 refresh_button = pageact2btn(QWebEnginePage.Reload) 171 stop_button = pageact2btn(QWebEnginePage.Stop) 172 previous_button = pageact2btn(QWebEnginePage.Back) 173 next_button = pageact2btn(QWebEnginePage.Forward) 174 175 stop_button.setEnabled(False) 176 self.webview.loadStarted.connect(lambda: stop_button.setEnabled(True)) 177 self.webview.loadFinished.connect(lambda: stop_button.setEnabled(False)) 178 179 progressbar = QProgressBar(self) 180 progressbar.setTextVisible(False) 181 progressbar.hide() 182 self.webview.loadStarted.connect(progressbar.show) 183 self.webview.loadProgress.connect(progressbar.setValue) 184 self.webview.loadFinished.connect(lambda _state: progressbar.hide()) 185 186 label = QLabel(self.get_label()) 187 188 self.url_combo = UrlComboBox(self) 189 self.url_combo.valid.connect(self.url_combo_activated) 190 if not WEBENGINE: 191 self.webview.iconChanged.connect(self.icon_changed) 192 193 self.find_widget = FindReplace(self) 194 self.find_widget.set_editor(self.webview) 195 self.find_widget.hide() 196 197 find_button = create_toolbutton(self, icon=ima.icon('find'), 198 tip=_("Find text"), 199 toggled=self.toggle_find_widget) 200 self.find_widget.visibility_changed.connect(find_button.setChecked) 201 202 hlayout = QHBoxLayout() 203 for widget in (previous_button, next_button, home_button, find_button, 204 label, self.url_combo, zoom_out_button, zoom_in_button, 205 refresh_button, progressbar, stop_button): 206 hlayout.addWidget(widget) 207 208 layout = QVBoxLayout() 209 layout.addLayout(hlayout) 210 layout.addWidget(self.webview) 211 layout.addWidget(self.find_widget) 212 self.setLayout(layout) 213 214 def get_label(self): 215 """Return address label text""" 216 return _("Address:") 217 218 def set_home_url(self, text): 219 """Set home URL""" 220 self.home_url = QUrl(text) 221 222 def set_url(self, url): 223 """Set current URL""" 224 self.url_changed(url) 225 self.go_to(url) 226 227 def go_to(self, url_or_text): 228 """Go to page *address*""" 229 if is_text_string(url_or_text): 230 url = QUrl(url_or_text) 231 else: 232 url = url_or_text 233 self.webview.load(url) 234 235 @Slot() 236 def go_home(self): 237 """Go to home page""" 238 if self.home_url is not None: 239 self.set_url(self.home_url) 240 241 def text_to_url(self, text): 242 """Convert text address into QUrl object""" 243 return QUrl(text) 244 245 def url_combo_activated(self, valid): 246 """Load URL from combo box first item""" 247 text = to_text_string(self.url_combo.currentText()) 248 self.go_to(self.text_to_url(text)) 249 250 def load_finished(self, ok): 251 if not ok: 252 self.webview.setHtml(_("Unable to load page")) 253 254 def url_to_text(self, url): 255 """Convert QUrl object to displayed text in combo box""" 256 return url.toString() 257 258 def url_changed(self, url): 259 """Displayed URL has changed -> updating URL combo box""" 260 self.url_combo.add_text(self.url_to_text(url)) 261 262 def icon_changed(self): 263 self.url_combo.setItemIcon(self.url_combo.currentIndex(), 264 self.webview.icon()) 265 self.setWindowIcon(self.webview.icon()) 266 267 @Slot(bool) 268 def toggle_find_widget(self, state): 269 if state: 270 self.find_widget.show() 271 else: 272 self.find_widget.hide() 273 274 275 class FrameWebView(QFrame): 276 """ 277 Framed QWebEngineView for UI consistency in Spyder. 278 """ 279 linkClicked = Signal(QUrl) 280 281 def __init__(self, parent): 282 QFrame.__init__(self, parent) 283 284 self._webview = WebView(self) 285 286 layout = QHBoxLayout() 287 layout.addWidget(self._webview) 288 layout.setContentsMargins(0, 0, 0, 0) 289 self.setLayout(layout) 290 291 self.setFrameStyle(QFrame.StyledPanel | QFrame.Sunken) 292 293 if WEBENGINE: 294 self._webview.page().linkClicked.connect(self.linkClicked) 295 else: 296 self._webview.linkClicked.connect(self.linkClicked) 297 298 def __getattr__(self, name): 299 return getattr(self._webview, name) 300 301 @property 302 def web_widget(self): 303 return self._webview 304 305 306 def test(): 307 """Run web browser""" 308 from spyder.utils.qthelpers import qapplication 309 app = qapplication(test_time=8) 310 widget = WebBrowser() 311 widget.show() 312 widget.set_home_url('http://www.google.com/') 313 widget.go_home() 314 sys.exit(app.exec_()) 315 316 317 if __name__ == '__main__': 318 test() 319 [end of spyder/widgets/browser.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/spyder/widgets/browser.py b/spyder/widgets/browser.py --- a/spyder/widgets/browser.py +++ b/spyder/widgets/browser.py @@ -142,7 +142,22 @@ add_actions(menu, actions) menu.popup(event.globalPos()) event.accept() - + + def setHtml(self, html, baseUrl=QUrl()): + """ + Reimplement Qt method to prevent WebEngine to steal focus + when setting html on the page + + Solution taken from + https://bugreports.qt.io/browse/QTBUG-52999 + """ + if WEBENGINE: + self.setEnabled(False) + super(WebView, self).setHtml(html, baseUrl) + self.setEnabled(True) + else: + super(WebView, self).setHtml(html, baseUrl) + class WebBrowser(QWidget): """
{"golden_diff": "diff --git a/spyder/widgets/browser.py b/spyder/widgets/browser.py\n--- a/spyder/widgets/browser.py\n+++ b/spyder/widgets/browser.py\n@@ -142,7 +142,22 @@\n add_actions(menu, actions)\r\n menu.popup(event.globalPos())\r\n event.accept()\r\n- \r\n+\r\n+ def setHtml(self, html, baseUrl=QUrl()):\r\n+ \"\"\"\r\n+ Reimplement Qt method to prevent WebEngine to steal focus\r\n+ when setting html on the page\r\n+\r\n+ Solution taken from\r\n+ https://bugreports.qt.io/browse/QTBUG-52999\r\n+ \"\"\"\r\n+ if WEBENGINE:\r\n+ self.setEnabled(False)\r\n+ super(WebView, self).setHtml(html, baseUrl)\r\n+ self.setEnabled(True)\r\n+ else:\r\n+ super(WebView, self).setHtml(html, baseUrl)\r\n+\r\n \r\n class WebBrowser(QWidget):\r\n \"\"\"\n", "issue": "Console/Editor lose focus when auto-connected to help\n## Description of your problem\n1. Open spyder\n2. Enable automatic connection to help in settings\n3. Type in editor or console until the help connects, either by function(, or function? in the console\n\nExpected behaviour is for the help for that function to show and I can continue typing my code.\nWhat happens is that when the help activates, where I'm typing loses focus and I need to click back into the editor or console window to continue typing. The only way to write code in the editor without constantly being interrupted is to disable the help connection, which is annoying because thats one of my favourite parts of spyder.\n## Versions and main components\n- Spyder Version: 3.0.0b4\n- Python Version:3.5.2\n- Operating system: Mac OSX 10.11.5\n## Dependencies\n\njedi >=0.8.1 : 0.9.0 (OK)\nmatplotlib >=1.0 : 1.5.1 (OK)\nnbconvert >=4.0 : 4.2.0 (OK)\nnumpy >=1.7 : 1.11.1 (OK)\npandas >=0.13.1 : 0.18.1 (OK)\npep8 >=0.6 : 1.7.0 (OK)\npsutil >=0.3 : 4.3.0 (OK)\npyflakes >=0.6.0 : 1.2.3 (OK)\npygments >=2.0 : 2.1.3 (OK)\nqtconsole >=4.2.0: 4.2.1 (OK)\nrope >=0.9.4 : 0.9.4-1 (OK)\nsphinx >=0.6.6 : 1.4.1 (OK)\nsympy >=0.7.3 : 1.0 (OK)\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\r\n#\r\n# Copyright \u00a9 Spyder Project Contributors\r\n# Licensed under the terms of the MIT License\r\n# (see spyder/__init__.py for details)\r\n\r\n\"\"\"Simple web browser widget\"\"\"\r\n\r\n# Standard library imports\r\nimport sys\r\n\r\n# Third party imports\r\nfrom qtpy.QtCore import QUrl, Signal, Slot\r\nfrom qtpy.QtWidgets import (QFrame, QHBoxLayout, QLabel, QProgressBar, QMenu,\r\n QVBoxLayout, QWidget)\r\nfrom qtpy.QtWebEngineWidgets import (QWebEnginePage, QWebEngineSettings,\r\n QWebEngineView, WEBENGINE)\r\n\r\n# Local imports\r\nfrom spyder.config.base import _, DEV\r\nfrom spyder.py3compat import is_text_string, to_text_string\r\nfrom spyder.utils.qthelpers import (action2button, add_actions,\r\n create_action, create_toolbutton)\r\nfrom spyder.utils import icon_manager as ima\r\nfrom spyder.widgets.comboboxes import UrlComboBox\r\nfrom spyder.widgets.findreplace import FindReplace\r\n\r\n\r\nclass WebPage(QWebEnginePage):\r\n \"\"\"\r\n Web page subclass to manage hyperlinks for WebEngine\r\n\r\n Note: This can't be used for WebKit because the\r\n acceptNavigationRequest method has a different\r\n functionality for it.\r\n \"\"\"\r\n linkClicked = Signal(QUrl)\r\n\r\n def acceptNavigationRequest(self, url, navigation_type, isMainFrame):\r\n \"\"\"\r\n Overloaded method to handle links ourselves\r\n \"\"\"\r\n if navigation_type == QWebEnginePage.NavigationTypeLinkClicked:\r\n self.linkClicked.emit(url)\r\n return False\r\n return True\r\n\r\n\r\nclass WebView(QWebEngineView):\r\n \"\"\"Web view\"\"\"\r\n def __init__(self, parent):\r\n QWebEngineView.__init__(self, parent)\r\n self.zoom_factor = 1.\r\n self.zoom_out_action = create_action(self, _(\"Zoom out\"),\r\n icon=ima.icon('zoom_out'),\r\n triggered=self.zoom_out)\r\n self.zoom_in_action = create_action(self, _(\"Zoom in\"),\r\n icon=ima.icon('zoom_in'),\r\n triggered=self.zoom_in)\r\n if WEBENGINE:\r\n web_page = WebPage(self)\r\n self.setPage(web_page)\r\n\r\n def find_text(self, text, changed=True,\r\n forward=True, case=False, words=False,\r\n regexp=False):\r\n \"\"\"Find text\"\"\"\r\n if not WEBENGINE:\r\n findflag = QWebEnginePage.FindWrapsAroundDocument\r\n else:\r\n findflag = 0\r\n\r\n if not forward:\r\n findflag = findflag | QWebEnginePage.FindBackward\r\n if case:\r\n findflag = findflag | QWebEnginePage.FindCaseSensitively\r\n\r\n return self.findText(text, QWebEnginePage.FindFlags(findflag))\r\n\r\n def get_selected_text(self):\r\n \"\"\"Return text selected by current text cursor\"\"\"\r\n return self.selectedText()\r\n \r\n def set_font(self, font, fixed_font=None):\r\n settings = self.page().settings()\r\n for fontfamily in (settings.StandardFont, settings.SerifFont,\r\n settings.SansSerifFont, settings.CursiveFont,\r\n settings.FantasyFont):\r\n settings.setFontFamily(fontfamily, font.family())\r\n if fixed_font is not None:\r\n settings.setFontFamily(settings.FixedFont, fixed_font.family())\r\n size = font.pointSize()\r\n settings.setFontSize(settings.DefaultFontSize, size)\r\n settings.setFontSize(settings.DefaultFixedFontSize, size)\r\n \r\n def apply_zoom_factor(self):\r\n \"\"\"Apply zoom factor\"\"\"\r\n if hasattr(self, 'setZoomFactor'):\r\n # Assuming Qt >=v4.5\r\n self.setZoomFactor(self.zoom_factor)\r\n else:\r\n # Qt v4.4\r\n self.setTextSizeMultiplier(self.zoom_factor)\r\n \r\n def set_zoom_factor(self, zoom_factor):\r\n \"\"\"Set zoom factor\"\"\"\r\n self.zoom_factor = zoom_factor\r\n self.apply_zoom_factor()\r\n \r\n def get_zoom_factor(self):\r\n \"\"\"Return zoom factor\"\"\"\r\n return self.zoom_factor\r\n\r\n @Slot()\r\n def zoom_out(self):\r\n \"\"\"Zoom out\"\"\"\r\n self.zoom_factor = max(.1, self.zoom_factor-.1)\r\n self.apply_zoom_factor()\r\n\r\n @Slot()\r\n def zoom_in(self):\r\n \"\"\"Zoom in\"\"\"\r\n self.zoom_factor += .1\r\n self.apply_zoom_factor()\r\n \r\n #------ QWebEngineView API -------------------------------------------------------\r\n def createWindow(self, webwindowtype):\r\n import webbrowser\r\n webbrowser.open(to_text_string(self.url().toString()))\r\n \r\n def contextMenuEvent(self, event):\r\n menu = QMenu(self)\r\n actions = [self.pageAction(QWebEnginePage.Back),\r\n self.pageAction(QWebEnginePage.Forward), None,\r\n self.pageAction(QWebEnginePage.SelectAll),\r\n self.pageAction(QWebEnginePage.Copy), None,\r\n self.zoom_in_action, self.zoom_out_action]\r\n if DEV and not WEBENGINE:\r\n settings = self.page().settings()\r\n settings.setAttribute(QWebEngineSettings.DeveloperExtrasEnabled, True)\r\n actions += [None, self.pageAction(QWebEnginePage.InspectElement)]\r\n add_actions(menu, actions)\r\n menu.popup(event.globalPos())\r\n event.accept()\r\n \r\n\r\nclass WebBrowser(QWidget):\r\n \"\"\"\r\n Web browser widget\r\n \"\"\"\r\n def __init__(self, parent=None):\r\n QWidget.__init__(self, parent)\r\n \r\n self.home_url = None\r\n \r\n self.webview = WebView(self)\r\n self.webview.loadFinished.connect(self.load_finished)\r\n self.webview.titleChanged.connect(self.setWindowTitle)\r\n self.webview.urlChanged.connect(self.url_changed)\r\n \r\n home_button = create_toolbutton(self, icon=ima.icon('home'),\r\n tip=_(\"Home\"),\r\n triggered=self.go_home)\r\n \r\n zoom_out_button = action2button(self.webview.zoom_out_action)\r\n zoom_in_button = action2button(self.webview.zoom_in_action)\r\n \r\n pageact2btn = lambda prop: action2button(self.webview.pageAction(prop),\r\n parent=self.webview)\r\n refresh_button = pageact2btn(QWebEnginePage.Reload)\r\n stop_button = pageact2btn(QWebEnginePage.Stop)\r\n previous_button = pageact2btn(QWebEnginePage.Back)\r\n next_button = pageact2btn(QWebEnginePage.Forward)\r\n \r\n stop_button.setEnabled(False)\r\n self.webview.loadStarted.connect(lambda: stop_button.setEnabled(True))\r\n self.webview.loadFinished.connect(lambda: stop_button.setEnabled(False))\r\n \r\n progressbar = QProgressBar(self)\r\n progressbar.setTextVisible(False)\r\n progressbar.hide()\r\n self.webview.loadStarted.connect(progressbar.show)\r\n self.webview.loadProgress.connect(progressbar.setValue)\r\n self.webview.loadFinished.connect(lambda _state: progressbar.hide())\r\n\r\n label = QLabel(self.get_label())\r\n\r\n self.url_combo = UrlComboBox(self)\r\n self.url_combo.valid.connect(self.url_combo_activated)\r\n if not WEBENGINE:\r\n self.webview.iconChanged.connect(self.icon_changed)\r\n\r\n self.find_widget = FindReplace(self)\r\n self.find_widget.set_editor(self.webview)\r\n self.find_widget.hide()\r\n\r\n find_button = create_toolbutton(self, icon=ima.icon('find'),\r\n tip=_(\"Find text\"),\r\n toggled=self.toggle_find_widget)\r\n self.find_widget.visibility_changed.connect(find_button.setChecked)\r\n\r\n hlayout = QHBoxLayout()\r\n for widget in (previous_button, next_button, home_button, find_button,\r\n label, self.url_combo, zoom_out_button, zoom_in_button,\r\n refresh_button, progressbar, stop_button):\r\n hlayout.addWidget(widget)\r\n \r\n layout = QVBoxLayout()\r\n layout.addLayout(hlayout)\r\n layout.addWidget(self.webview)\r\n layout.addWidget(self.find_widget)\r\n self.setLayout(layout)\r\n \r\n def get_label(self):\r\n \"\"\"Return address label text\"\"\"\r\n return _(\"Address:\")\r\n \r\n def set_home_url(self, text):\r\n \"\"\"Set home URL\"\"\"\r\n self.home_url = QUrl(text)\r\n \r\n def set_url(self, url):\r\n \"\"\"Set current URL\"\"\"\r\n self.url_changed(url)\r\n self.go_to(url)\r\n \r\n def go_to(self, url_or_text):\r\n \"\"\"Go to page *address*\"\"\"\r\n if is_text_string(url_or_text):\r\n url = QUrl(url_or_text)\r\n else:\r\n url = url_or_text\r\n self.webview.load(url)\r\n\r\n @Slot()\r\n def go_home(self):\r\n \"\"\"Go to home page\"\"\"\r\n if self.home_url is not None:\r\n self.set_url(self.home_url)\r\n \r\n def text_to_url(self, text):\r\n \"\"\"Convert text address into QUrl object\"\"\"\r\n return QUrl(text)\r\n \r\n def url_combo_activated(self, valid):\r\n \"\"\"Load URL from combo box first item\"\"\"\r\n text = to_text_string(self.url_combo.currentText())\r\n self.go_to(self.text_to_url(text))\r\n \r\n def load_finished(self, ok):\r\n if not ok:\r\n self.webview.setHtml(_(\"Unable to load page\"))\r\n \r\n def url_to_text(self, url):\r\n \"\"\"Convert QUrl object to displayed text in combo box\"\"\"\r\n return url.toString()\r\n \r\n def url_changed(self, url):\r\n \"\"\"Displayed URL has changed -> updating URL combo box\"\"\"\r\n self.url_combo.add_text(self.url_to_text(url))\r\n \r\n def icon_changed(self):\r\n self.url_combo.setItemIcon(self.url_combo.currentIndex(),\r\n self.webview.icon())\r\n self.setWindowIcon(self.webview.icon())\r\n\r\n @Slot(bool)\r\n def toggle_find_widget(self, state):\r\n if state:\r\n self.find_widget.show()\r\n else:\r\n self.find_widget.hide()\r\n\r\n\r\nclass FrameWebView(QFrame):\r\n \"\"\"\r\n Framed QWebEngineView for UI consistency in Spyder.\r\n \"\"\"\r\n linkClicked = Signal(QUrl)\r\n\r\n def __init__(self, parent):\r\n QFrame.__init__(self, parent)\r\n\r\n self._webview = WebView(self)\r\n\r\n layout = QHBoxLayout()\r\n layout.addWidget(self._webview)\r\n layout.setContentsMargins(0, 0, 0, 0)\r\n self.setLayout(layout)\r\n\r\n self.setFrameStyle(QFrame.StyledPanel | QFrame.Sunken)\r\n\r\n if WEBENGINE:\r\n self._webview.page().linkClicked.connect(self.linkClicked)\r\n else:\r\n self._webview.linkClicked.connect(self.linkClicked)\r\n\r\n def __getattr__(self, name):\r\n return getattr(self._webview, name)\r\n\r\n @property\r\n def web_widget(self):\r\n return self._webview\r\n\r\n\r\ndef test():\r\n \"\"\"Run web browser\"\"\"\r\n from spyder.utils.qthelpers import qapplication\r\n app = qapplication(test_time=8)\r\n widget = WebBrowser()\r\n widget.show()\r\n widget.set_home_url('http://www.google.com/')\r\n widget.go_home()\r\n sys.exit(app.exec_())\r\n\r\n\r\nif __name__ == '__main__':\r\n test()\r\n", "path": "spyder/widgets/browser.py"}]}
4,086
204
gh_patches_debug_7697
rasdani/github-patches
git_diff
azavea__raster-vision-800
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Config builders cannot have type annotations in __init__ Trying to build [a config class](https://github.com/raster-foundry/raster-vision-plugin/blob/996044a503d09d311105d07da98b31284b6a6e91/src/rf_raster_vision_plugin/raster_source/config.py) with type annotations, you get: ``` In [7]: RfRasterSourceConfigBuilder(RfRasterSourceConfig).build() --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-7-2f92db6db3a6> in <module>() ----> 1 RfRasterSourceConfigBuilder(RfRasterSourceConfig).build() /opt/src/rastervision/core/config.py in build(self) 99 """ 100 self.validate() --> 101 arguments = set(inspect.getargspec(self.config_class).args) 102 keys = set(self.config.keys()) 103 config = {k: self.config[k] for k in (arguments & keys)} /usr/lib/python3.5/inspect.py in getargspec(func) 1043 getfullargspec(func) 1044 if kwonlyargs or ann: -> 1045 raise ValueError("Function has keyword-only arguments or annotations" 1046 ", use getfullargspec() API which can support them") 1047 return ArgSpec(args, varargs, varkw, defaults) ValueError: Function has keyword-only arguments or annotations, use getfullargspec() API which can support them ``` Reproduction ----- - `docker/run` from the linked repo - `ipython` ```python >>> from rf_raster_vision_plugin.raster_source.config import RfRasterSourceConfig, RfRasterSourceConfigBuilder >>> RfRasterSourceConfigBuilder(RfRasterSourceConfig).build() ``` Expected Behavior ----- Config builder shouldn't choke on type annotations </issue> <code> [start of rastervision/core/config.py] 1 from abc import (ABC, abstractmethod) 2 import os 3 import inspect 4 5 from rastervision.utils.files import download_or_copy 6 7 8 class ConfigError(Exception): 9 pass 10 11 12 class Config(ABC): 13 @abstractmethod 14 def to_builder(self): 15 """Return a builder based on this config. 16 """ 17 pass # pragma: no cover 18 19 @abstractmethod 20 def to_proto(self): 21 """Returns the protobuf configuration for this config. 22 """ 23 pass # pragma: no cover 24 25 def update_for_command(self, 26 command_type, 27 experiment_config, 28 context=None, 29 io_def=None): 30 """Updates this configuration for the given command 31 32 Note: While configuration is immutable for client facing operations, 33 this is an internal operation and mutates the configuration. 34 35 Args: 36 command_type: The command type that is currently being 37 preprocessed. experiment_config: The experiment configuration 38 that this configuration is a part of. 39 context: Optional list of parent configurations, to allow for child 40 configurations contained in collections to understand their 41 context in the experiment configuration. 42 43 Returns: 44 Nothing. Call should mutate the configuration object itself. 45 """ 46 pass # pragma: no cover 47 48 @abstractmethod 49 def report_io(self, command_type, io_def): 50 """Updates the given CommandIODefinition. 51 52 So that it includes the inputs, outputs, and missing files for this 53 configuration at this command. 54 55 Args: 56 command_type: The command type that is currently being preprocessed. 57 io_def: The CommandIODefinition that this call should modify. 58 59 Returns: Nothing. This call should make the appropriate calls to the 60 given io_def to mutate its state. 61 """ 62 pass 63 64 @staticmethod 65 @abstractmethod 66 def builder(): 67 """Returns a new builder that takes this configuration 68 as its starting point. 69 """ 70 pass # pragma: no cover 71 72 @staticmethod 73 @abstractmethod 74 def from_proto(msg): 75 """Creates a Config from the specificed protobuf message 76 TODO: Allow loading from file uri or dict 77 """ 78 pass # pragma: no cover 79 80 81 class ConfigBuilder(ABC): 82 def __init__(self, config_class, config=None): 83 """Construct a builder. 84 85 Args: 86 config_class: The Config class that this builder builds. 87 config: A dictionary of **kwargs that will eventually be passed 88 into the __init__ method of config_class to build the configuration. 89 This config is modified with the fluent builder methods. 90 """ 91 if config is None: # pragma: no cover 92 config = {} 93 94 self.config_class = config_class 95 self.config = config 96 97 def build(self): 98 """Returns the configuration that is built by this builder. 99 """ 100 self.validate() 101 arguments = set(inspect.getargspec(self.config_class).args) 102 keys = set(self.config.keys()) 103 config = {k: self.config[k] for k in (arguments & keys)} 104 return self.config_class(**config) 105 106 def validate(self): 107 """Validate this config, if there is validation on the builder that 108 is not captured by the required arguments of the config. 109 """ 110 pass # pragma: no cover 111 112 @abstractmethod 113 def from_proto(self, msg): 114 """Return a builder that takes the configuration from the proto message 115 as its starting point. 116 """ 117 pass # pragma: no cover 118 119 120 class BundledConfigMixin(ABC): 121 """Mixin for configurations that participate in the bundling of a 122 prediction package""" 123 124 @abstractmethod 125 def save_bundle_files(self, bundle_dir): 126 """Place files into a bundle directory for bundling into 127 a prediction package. 128 129 Returns: A tuple of (config, uris) of the modified configuration 130 with the basenames of URIs in place of the original URIs, 131 and a list of URIs that are to be bundled. 132 """ 133 pass # pragma: no cover 134 135 def bundle_file(self, uri, bundle_dir): 136 local_path = download_or_copy(uri, bundle_dir) 137 base_name = os.path.basename(local_path) 138 return (local_path, base_name) 139 140 @abstractmethod 141 def load_bundle_files(self, bundle_dir): 142 """Load files from a prediction package bundle directory.""" 143 pass # pragma: no cover 144 [end of rastervision/core/config.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/rastervision/core/config.py b/rastervision/core/config.py --- a/rastervision/core/config.py +++ b/rastervision/core/config.py @@ -98,7 +98,7 @@ """Returns the configuration that is built by this builder. """ self.validate() - arguments = set(inspect.getargspec(self.config_class).args) + arguments = set(inspect.getfullargspec(self.config_class).args) keys = set(self.config.keys()) config = {k: self.config[k] for k in (arguments & keys)} return self.config_class(**config)
{"golden_diff": "diff --git a/rastervision/core/config.py b/rastervision/core/config.py\n--- a/rastervision/core/config.py\n+++ b/rastervision/core/config.py\n@@ -98,7 +98,7 @@\n \"\"\"Returns the configuration that is built by this builder.\n \"\"\"\n self.validate()\n- arguments = set(inspect.getargspec(self.config_class).args)\n+ arguments = set(inspect.getfullargspec(self.config_class).args)\n keys = set(self.config.keys())\n config = {k: self.config[k] for k in (arguments & keys)}\n return self.config_class(**config)\n", "issue": "Config builders cannot have type annotations in __init__\nTrying to build [a config class](https://github.com/raster-foundry/raster-vision-plugin/blob/996044a503d09d311105d07da98b31284b6a6e91/src/rf_raster_vision_plugin/raster_source/config.py) with type annotations, you get:\r\n\r\n```\r\nIn [7]: RfRasterSourceConfigBuilder(RfRasterSourceConfig).build()\r\n---------------------------------------------------------------------------\r\nValueError Traceback (most recent call last)\r\n<ipython-input-7-2f92db6db3a6> in <module>()\r\n----> 1 RfRasterSourceConfigBuilder(RfRasterSourceConfig).build()\r\n\r\n/opt/src/rastervision/core/config.py in build(self)\r\n 99 \"\"\"\r\n 100 self.validate()\r\n--> 101 arguments = set(inspect.getargspec(self.config_class).args)\r\n 102 keys = set(self.config.keys())\r\n 103 config = {k: self.config[k] for k in (arguments & keys)}\r\n\r\n/usr/lib/python3.5/inspect.py in getargspec(func)\r\n 1043 getfullargspec(func)\r\n 1044 if kwonlyargs or ann:\r\n-> 1045 raise ValueError(\"Function has keyword-only arguments or annotations\"\r\n 1046 \", use getfullargspec() API which can support them\")\r\n 1047 return ArgSpec(args, varargs, varkw, defaults)\r\n\r\nValueError: Function has keyword-only arguments or annotations, use getfullargspec() API which can support them\r\n```\r\n\r\nReproduction\r\n-----\r\n\r\n- `docker/run` from the linked repo\r\n- `ipython`\r\n\r\n```python\r\n>>> from rf_raster_vision_plugin.raster_source.config import RfRasterSourceConfig, RfRasterSourceConfigBuilder\r\n>>> RfRasterSourceConfigBuilder(RfRasterSourceConfig).build()\r\n```\r\n\r\nExpected Behavior\r\n-----\r\n\r\nConfig builder shouldn't choke on type annotations\n", "before_files": [{"content": "from abc import (ABC, abstractmethod)\nimport os\nimport inspect\n\nfrom rastervision.utils.files import download_or_copy\n\n\nclass ConfigError(Exception):\n pass\n\n\nclass Config(ABC):\n @abstractmethod\n def to_builder(self):\n \"\"\"Return a builder based on this config.\n \"\"\"\n pass # pragma: no cover\n\n @abstractmethod\n def to_proto(self):\n \"\"\"Returns the protobuf configuration for this config.\n \"\"\"\n pass # pragma: no cover\n\n def update_for_command(self,\n command_type,\n experiment_config,\n context=None,\n io_def=None):\n \"\"\"Updates this configuration for the given command\n\n Note: While configuration is immutable for client facing operations,\n this is an internal operation and mutates the configuration.\n\n Args:\n command_type: The command type that is currently being\n preprocessed. experiment_config: The experiment configuration\n that this configuration is a part of.\n context: Optional list of parent configurations, to allow for child\n configurations contained in collections to understand their\n context in the experiment configuration.\n\n Returns:\n Nothing. Call should mutate the configuration object itself.\n \"\"\"\n pass # pragma: no cover\n\n @abstractmethod\n def report_io(self, command_type, io_def):\n \"\"\"Updates the given CommandIODefinition.\n\n So that it includes the inputs, outputs, and missing files for this\n configuration at this command.\n\n Args:\n command_type: The command type that is currently being preprocessed.\n io_def: The CommandIODefinition that this call should modify.\n\n Returns: Nothing. This call should make the appropriate calls to the\n given io_def to mutate its state.\n \"\"\"\n pass\n\n @staticmethod\n @abstractmethod\n def builder():\n \"\"\"Returns a new builder that takes this configuration\n as its starting point.\n \"\"\"\n pass # pragma: no cover\n\n @staticmethod\n @abstractmethod\n def from_proto(msg):\n \"\"\"Creates a Config from the specificed protobuf message\n TODO: Allow loading from file uri or dict\n \"\"\"\n pass # pragma: no cover\n\n\nclass ConfigBuilder(ABC):\n def __init__(self, config_class, config=None):\n \"\"\"Construct a builder.\n\n Args:\n config_class: The Config class that this builder builds.\n config: A dictionary of **kwargs that will eventually be passed\n into the __init__ method of config_class to build the configuration.\n This config is modified with the fluent builder methods.\n \"\"\"\n if config is None: # pragma: no cover\n config = {}\n\n self.config_class = config_class\n self.config = config\n\n def build(self):\n \"\"\"Returns the configuration that is built by this builder.\n \"\"\"\n self.validate()\n arguments = set(inspect.getargspec(self.config_class).args)\n keys = set(self.config.keys())\n config = {k: self.config[k] for k in (arguments & keys)}\n return self.config_class(**config)\n\n def validate(self):\n \"\"\"Validate this config, if there is validation on the builder that\n is not captured by the required arguments of the config.\n \"\"\"\n pass # pragma: no cover\n\n @abstractmethod\n def from_proto(self, msg):\n \"\"\"Return a builder that takes the configuration from the proto message\n as its starting point.\n \"\"\"\n pass # pragma: no cover\n\n\nclass BundledConfigMixin(ABC):\n \"\"\"Mixin for configurations that participate in the bundling of a\n prediction package\"\"\"\n\n @abstractmethod\n def save_bundle_files(self, bundle_dir):\n \"\"\"Place files into a bundle directory for bundling into\n a prediction package.\n\n Returns: A tuple of (config, uris) of the modified configuration\n with the basenames of URIs in place of the original URIs,\n and a list of URIs that are to be bundled.\n \"\"\"\n pass # pragma: no cover\n\n def bundle_file(self, uri, bundle_dir):\n local_path = download_or_copy(uri, bundle_dir)\n base_name = os.path.basename(local_path)\n return (local_path, base_name)\n\n @abstractmethod\n def load_bundle_files(self, bundle_dir):\n \"\"\"Load files from a prediction package bundle directory.\"\"\"\n pass # pragma: no cover\n", "path": "rastervision/core/config.py"}]}
2,259
137
gh_patches_debug_15460
rasdani/github-patches
git_diff
streamlink__streamlink-405
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Adult Swim doesn't play any streams. https://www.adultswim.com/videos/streams https://www.adultswim.com/videos/streams/toonami https://www.adultswim.com/videos/streams/williams-stream </issue> <code> [start of src/streamlink/stream/hls.py] 1 from collections import defaultdict, namedtuple 2 3 try: 4 from Crypto.Cipher import AES 5 import struct 6 7 def num_to_iv(n): 8 return struct.pack(">8xq", n) 9 10 CAN_DECRYPT = True 11 except ImportError: 12 CAN_DECRYPT = False 13 14 from . import hls_playlist 15 from .http import HTTPStream 16 from .segmented import (SegmentedStreamReader, 17 SegmentedStreamWriter, 18 SegmentedStreamWorker) 19 from ..exceptions import StreamError 20 21 22 Sequence = namedtuple("Sequence", "num segment") 23 24 25 class HLSStreamWriter(SegmentedStreamWriter): 26 def __init__(self, reader, *args, **kwargs): 27 options = reader.stream.session.options 28 kwargs["retries"] = options.get("hls-segment-attempts") 29 kwargs["threads"] = options.get("hls-segment-threads") 30 kwargs["timeout"] = options.get("hls-segment-timeout") 31 SegmentedStreamWriter.__init__(self, reader, *args, **kwargs) 32 33 self.byterange_offsets = defaultdict(int) 34 self.key_data = None 35 self.key_uri = None 36 37 def create_decryptor(self, key, sequence): 38 if key.method != "AES-128": 39 raise StreamError("Unable to decrypt cipher {0}", key.method) 40 41 if not key.uri: 42 raise StreamError("Missing URI to decryption key") 43 44 if self.key_uri != key.uri: 45 res = self.session.http.get(key.uri, exception=StreamError, 46 **self.reader.request_params) 47 self.key_data = res.content 48 self.key_uri = key.uri 49 50 iv = key.iv or num_to_iv(sequence) 51 52 # Pad IV if needed 53 iv = b"\x00" * (16 - len(iv)) + iv 54 55 return AES.new(self.key_data, AES.MODE_CBC, iv) 56 57 def create_request_params(self, sequence): 58 request_params = dict(self.reader.request_params) 59 headers = request_params.pop("headers", {}) 60 61 if sequence.segment.byterange: 62 bytes_start = self.byterange_offsets[sequence.segment.uri] 63 if sequence.segment.byterange.offset is not None: 64 bytes_start = sequence.segment.byterange.offset 65 66 bytes_len = max(sequence.segment.byterange.range - 1, 0) 67 bytes_end = bytes_start + bytes_len 68 headers["Range"] = "bytes={0}-{1}".format(bytes_start, bytes_end) 69 self.byterange_offsets[sequence.segment.uri] = bytes_end + 1 70 71 request_params["headers"] = headers 72 73 return request_params 74 75 def fetch(self, sequence, retries=None): 76 if self.closed or not retries: 77 return 78 79 try: 80 request_params = self.create_request_params(sequence) 81 return self.session.http.get(sequence.segment.uri, 82 stream=True, 83 timeout=self.timeout, 84 exception=StreamError, 85 **request_params) 86 except StreamError as err: 87 self.logger.error("Failed to open segment {0}: {1}", sequence.num, err) 88 return self.fetch(sequence, retries - 1) 89 90 def write(self, sequence, res, chunk_size=8192, retries=None): 91 retries = retries or self.retries 92 if retries == 0: 93 self.logger.error("Failed to open segment {0}", sequence.num) 94 return 95 try: 96 if sequence.segment.key and sequence.segment.key.method != "NONE": 97 try: 98 decryptor = self.create_decryptor(sequence.segment.key, 99 sequence.num) 100 except StreamError as err: 101 self.logger.error("Failed to create decryptor: {0}", err) 102 self.close() 103 return 104 105 for chunk in res.iter_content(chunk_size): 106 # If the input data is not a multiple of 16, cut off any garbage 107 garbage_len = len(chunk) % 16 108 if garbage_len: 109 self.logger.debug("Cutting off {0} bytes of garbage " 110 "before decrypting", garbage_len) 111 decrypted_chunk = decryptor.decrypt(chunk[:-garbage_len]) 112 else: 113 decrypted_chunk = decryptor.decrypt(chunk) 114 self.reader.buffer.write(decrypted_chunk) 115 else: 116 for chunk in res.iter_content(chunk_size): 117 self.reader.buffer.write(chunk) 118 except StreamError as err: 119 self.logger.error("Failed to open segment {0}: {1}", sequence.num, err) 120 return self.write(sequence, 121 self.fetch(sequence, retries=self.retries), 122 chunk_size=chunk_size, 123 retries=retries - 1) 124 125 self.logger.debug("Download of segment {0} complete", sequence.num) 126 127 128 class HLSStreamWorker(SegmentedStreamWorker): 129 def __init__(self, *args, **kwargs): 130 SegmentedStreamWorker.__init__(self, *args, **kwargs) 131 132 self.playlist_changed = False 133 self.playlist_end = None 134 self.playlist_sequence = -1 135 self.playlist_sequences = [] 136 self.playlist_reload_time = 15 137 self.live_edge = self.session.options.get("hls-live-edge") 138 139 self.reload_playlist() 140 141 def reload_playlist(self): 142 if self.closed: 143 return 144 145 self.reader.buffer.wait_free() 146 self.logger.debug("Reloading playlist") 147 res = self.session.http.get(self.stream.url, 148 exception=StreamError, 149 **self.reader.request_params) 150 151 try: 152 playlist = hls_playlist.load(res.text, res.url) 153 except ValueError as err: 154 raise StreamError(err) 155 156 if playlist.is_master: 157 raise StreamError("Attempted to play a variant playlist, use " 158 "'hlsvariant://{0}' instead".format(self.stream.url)) 159 160 if playlist.iframes_only: 161 raise StreamError("Streams containing I-frames only is not playable") 162 163 media_sequence = playlist.media_sequence or 0 164 sequences = [Sequence(media_sequence + i, s) 165 for i, s in enumerate(playlist.segments)] 166 167 if sequences: 168 self.process_sequences(playlist, sequences) 169 170 def process_sequences(self, playlist, sequences): 171 first_sequence, last_sequence = sequences[0], sequences[-1] 172 173 if first_sequence.segment.key and first_sequence.segment.key.method != "NONE": 174 self.logger.debug("Segments in this playlist are encrypted") 175 176 if not CAN_DECRYPT: 177 raise StreamError("Need pyCrypto or pycryptodome installed to decrypt this stream") 178 179 self.playlist_changed = ([s.num for s in self.playlist_sequences] != 180 [s.num for s in sequences]) 181 self.playlist_reload_time = (playlist.target_duration or 182 last_sequence.segment.duration) 183 self.playlist_sequences = sequences 184 185 if not self.playlist_changed: 186 self.playlist_reload_time = max(self.playlist_reload_time / 2, 1) 187 188 if playlist.is_endlist: 189 self.playlist_end = last_sequence.num 190 191 if self.playlist_sequence < 0: 192 if self.playlist_end is None: 193 edge_index = -(min(len(sequences), max(int(self.live_edge), 1))) 194 edge_sequence = sequences[edge_index] 195 self.playlist_sequence = edge_sequence.num 196 else: 197 self.playlist_sequence = first_sequence.num 198 199 def valid_sequence(self, sequence): 200 return sequence.num >= self.playlist_sequence 201 202 def iter_segments(self): 203 while not self.closed: 204 for sequence in filter(self.valid_sequence, self.playlist_sequences): 205 self.logger.debug("Adding segment {0} to queue", sequence.num) 206 yield sequence 207 208 # End of stream 209 stream_end = self.playlist_end and sequence.num >= self.playlist_end 210 if self.closed or stream_end: 211 return 212 213 self.playlist_sequence = sequence.num + 1 214 215 if self.wait(self.playlist_reload_time): 216 try: 217 self.reload_playlist() 218 except StreamError as err: 219 self.logger.warning("Failed to reload playlist: {0}", err) 220 221 222 class HLSStreamReader(SegmentedStreamReader): 223 __worker__ = HLSStreamWorker 224 __writer__ = HLSStreamWriter 225 226 def __init__(self, stream, *args, **kwargs): 227 SegmentedStreamReader.__init__(self, stream, *args, **kwargs) 228 self.logger = stream.session.logger.new_module("stream.hls") 229 self.request_params = dict(stream.args) 230 self.timeout = stream.session.options.get("hls-timeout") 231 232 # These params are reserved for internal use 233 self.request_params.pop("exception", None) 234 self.request_params.pop("stream", None) 235 self.request_params.pop("timeout", None) 236 self.request_params.pop("url", None) 237 238 239 class HLSStream(HTTPStream): 240 """Implementation of the Apple HTTP Live Streaming protocol 241 242 *Attributes:* 243 244 - :attr:`url` The URL to the HLS playlist. 245 - :attr:`args` A :class:`dict` containing keyword arguments passed 246 to :meth:`requests.request`, such as headers and cookies. 247 248 .. versionchanged:: 1.7.0 249 Added *args* attribute. 250 251 """ 252 253 __shortname__ = "hls" 254 255 def __init__(self, session_, url, **args): 256 HTTPStream.__init__(self, session_, url, **args) 257 258 def __repr__(self): 259 return "<HLSStream({0!r})>".format(self.url) 260 261 def __json__(self): 262 json = HTTPStream.__json__(self) 263 264 # Pretty sure HLS is GET only. 265 del json["method"] 266 del json["body"] 267 268 return json 269 270 def open(self): 271 reader = HLSStreamReader(self) 272 reader.open() 273 274 return reader 275 276 @classmethod 277 def parse_variant_playlist(cls, session_, url, name_key="name", 278 name_prefix="", check_streams=False, 279 **request_params): 280 """Attempts to parse a variant playlist and return its streams. 281 282 :param url: The URL of the variant playlist. 283 :param name_key: Prefer to use this key as stream name, valid keys are: 284 name, pixels, bitrate. 285 :param name_prefix: Add this prefix to the stream names. 286 :param check_streams: Only allow streams that are accesible. 287 """ 288 289 # Backwards compatibility with "namekey" and "nameprefix" params. 290 name_key = request_params.pop("namekey", name_key) 291 name_prefix = request_params.pop("nameprefix", name_prefix) 292 293 res = session_.http.get(url, exception=IOError, **request_params) 294 295 try: 296 parser = hls_playlist.load(res.text, base_uri=res.url) 297 except ValueError as err: 298 raise IOError("Failed to parse playlist: {0}".format(err)) 299 300 streams = {} 301 for playlist in filter(lambda p: not p.is_iframe, parser.playlists): 302 names = dict(name=None, pixels=None, bitrate=None) 303 304 for media in playlist.media: 305 if media.type == "VIDEO" and media.name: 306 names["name"] = media.name 307 308 if playlist.stream_info.resolution: 309 width, height = playlist.stream_info.resolution 310 names["pixels"] = "{0}p".format(height) 311 312 if playlist.stream_info.bandwidth: 313 bw = playlist.stream_info.bandwidth 314 315 if bw >= 1000: 316 names["bitrate"] = "{0}k".format(int(bw / 1000.0)) 317 else: 318 names["bitrate"] = "{0}k".format(bw / 1000.0) 319 320 stream_name = (names.get(name_key) or names.get("name") or 321 names.get("pixels") or names.get("bitrate")) 322 323 if not stream_name or stream_name in streams: 324 continue 325 326 if check_streams: 327 try: 328 session_.http.get(playlist.uri, **request_params) 329 except Exception: 330 continue 331 332 stream = HLSStream(session_, playlist.uri, **request_params) 333 streams[name_prefix + stream_name] = stream 334 335 return streams 336 [end of src/streamlink/stream/hls.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/streamlink/stream/hls.py b/src/streamlink/stream/hls.py --- a/src/streamlink/stream/hls.py +++ b/src/streamlink/stream/hls.py @@ -320,8 +320,17 @@ stream_name = (names.get(name_key) or names.get("name") or names.get("pixels") or names.get("bitrate")) - if not stream_name or stream_name in streams: + if not stream_name: continue + if stream_name in streams: # rename duplicate streams + stream_name = "{0}_alt".format(stream_name) + num_alts = len(list(filter(lambda n: n.startswith(stream_name), streams.keys()))) + + # We shouldn't need more than 2 alt streams + if num_alts >= 2: + continue + elif num_alts > 0: + stream_name = "{0}{1}".format(stream_name, num_alts + 1) if check_streams: try:
{"golden_diff": "diff --git a/src/streamlink/stream/hls.py b/src/streamlink/stream/hls.py\n--- a/src/streamlink/stream/hls.py\n+++ b/src/streamlink/stream/hls.py\n@@ -320,8 +320,17 @@\n stream_name = (names.get(name_key) or names.get(\"name\") or\n names.get(\"pixels\") or names.get(\"bitrate\"))\n \n- if not stream_name or stream_name in streams:\n+ if not stream_name:\n continue\n+ if stream_name in streams: # rename duplicate streams\n+ stream_name = \"{0}_alt\".format(stream_name)\n+ num_alts = len(list(filter(lambda n: n.startswith(stream_name), streams.keys())))\n+\n+ # We shouldn't need more than 2 alt streams\n+ if num_alts >= 2:\n+ continue\n+ elif num_alts > 0:\n+ stream_name = \"{0}{1}\".format(stream_name, num_alts + 1)\n \n if check_streams:\n try:\n", "issue": "Adult Swim doesn't play any streams.\nhttps://www.adultswim.com/videos/streams\r\nhttps://www.adultswim.com/videos/streams/toonami\r\nhttps://www.adultswim.com/videos/streams/williams-stream\n", "before_files": [{"content": "from collections import defaultdict, namedtuple\n\ntry:\n from Crypto.Cipher import AES\n import struct\n\n def num_to_iv(n):\n return struct.pack(\">8xq\", n)\n\n CAN_DECRYPT = True\nexcept ImportError:\n CAN_DECRYPT = False\n\nfrom . import hls_playlist\nfrom .http import HTTPStream\nfrom .segmented import (SegmentedStreamReader,\n SegmentedStreamWriter,\n SegmentedStreamWorker)\nfrom ..exceptions import StreamError\n\n\nSequence = namedtuple(\"Sequence\", \"num segment\")\n\n\nclass HLSStreamWriter(SegmentedStreamWriter):\n def __init__(self, reader, *args, **kwargs):\n options = reader.stream.session.options\n kwargs[\"retries\"] = options.get(\"hls-segment-attempts\")\n kwargs[\"threads\"] = options.get(\"hls-segment-threads\")\n kwargs[\"timeout\"] = options.get(\"hls-segment-timeout\")\n SegmentedStreamWriter.__init__(self, reader, *args, **kwargs)\n\n self.byterange_offsets = defaultdict(int)\n self.key_data = None\n self.key_uri = None\n\n def create_decryptor(self, key, sequence):\n if key.method != \"AES-128\":\n raise StreamError(\"Unable to decrypt cipher {0}\", key.method)\n\n if not key.uri:\n raise StreamError(\"Missing URI to decryption key\")\n\n if self.key_uri != key.uri:\n res = self.session.http.get(key.uri, exception=StreamError,\n **self.reader.request_params)\n self.key_data = res.content\n self.key_uri = key.uri\n\n iv = key.iv or num_to_iv(sequence)\n\n # Pad IV if needed\n iv = b\"\\x00\" * (16 - len(iv)) + iv\n\n return AES.new(self.key_data, AES.MODE_CBC, iv)\n\n def create_request_params(self, sequence):\n request_params = dict(self.reader.request_params)\n headers = request_params.pop(\"headers\", {})\n\n if sequence.segment.byterange:\n bytes_start = self.byterange_offsets[sequence.segment.uri]\n if sequence.segment.byterange.offset is not None:\n bytes_start = sequence.segment.byterange.offset\n\n bytes_len = max(sequence.segment.byterange.range - 1, 0)\n bytes_end = bytes_start + bytes_len\n headers[\"Range\"] = \"bytes={0}-{1}\".format(bytes_start, bytes_end)\n self.byterange_offsets[sequence.segment.uri] = bytes_end + 1\n\n request_params[\"headers\"] = headers\n\n return request_params\n\n def fetch(self, sequence, retries=None):\n if self.closed or not retries:\n return\n\n try:\n request_params = self.create_request_params(sequence)\n return self.session.http.get(sequence.segment.uri,\n stream=True,\n timeout=self.timeout,\n exception=StreamError,\n **request_params)\n except StreamError as err:\n self.logger.error(\"Failed to open segment {0}: {1}\", sequence.num, err)\n return self.fetch(sequence, retries - 1)\n\n def write(self, sequence, res, chunk_size=8192, retries=None):\n retries = retries or self.retries\n if retries == 0:\n self.logger.error(\"Failed to open segment {0}\", sequence.num)\n return\n try:\n if sequence.segment.key and sequence.segment.key.method != \"NONE\":\n try:\n decryptor = self.create_decryptor(sequence.segment.key,\n sequence.num)\n except StreamError as err:\n self.logger.error(\"Failed to create decryptor: {0}\", err)\n self.close()\n return\n\n for chunk in res.iter_content(chunk_size):\n # If the input data is not a multiple of 16, cut off any garbage\n garbage_len = len(chunk) % 16\n if garbage_len:\n self.logger.debug(\"Cutting off {0} bytes of garbage \"\n \"before decrypting\", garbage_len)\n decrypted_chunk = decryptor.decrypt(chunk[:-garbage_len])\n else:\n decrypted_chunk = decryptor.decrypt(chunk)\n self.reader.buffer.write(decrypted_chunk)\n else:\n for chunk in res.iter_content(chunk_size):\n self.reader.buffer.write(chunk)\n except StreamError as err:\n self.logger.error(\"Failed to open segment {0}: {1}\", sequence.num, err)\n return self.write(sequence,\n self.fetch(sequence, retries=self.retries),\n chunk_size=chunk_size,\n retries=retries - 1)\n\n self.logger.debug(\"Download of segment {0} complete\", sequence.num)\n\n\nclass HLSStreamWorker(SegmentedStreamWorker):\n def __init__(self, *args, **kwargs):\n SegmentedStreamWorker.__init__(self, *args, **kwargs)\n\n self.playlist_changed = False\n self.playlist_end = None\n self.playlist_sequence = -1\n self.playlist_sequences = []\n self.playlist_reload_time = 15\n self.live_edge = self.session.options.get(\"hls-live-edge\")\n\n self.reload_playlist()\n\n def reload_playlist(self):\n if self.closed:\n return\n\n self.reader.buffer.wait_free()\n self.logger.debug(\"Reloading playlist\")\n res = self.session.http.get(self.stream.url,\n exception=StreamError,\n **self.reader.request_params)\n\n try:\n playlist = hls_playlist.load(res.text, res.url)\n except ValueError as err:\n raise StreamError(err)\n\n if playlist.is_master:\n raise StreamError(\"Attempted to play a variant playlist, use \"\n \"'hlsvariant://{0}' instead\".format(self.stream.url))\n\n if playlist.iframes_only:\n raise StreamError(\"Streams containing I-frames only is not playable\")\n\n media_sequence = playlist.media_sequence or 0\n sequences = [Sequence(media_sequence + i, s)\n for i, s in enumerate(playlist.segments)]\n\n if sequences:\n self.process_sequences(playlist, sequences)\n\n def process_sequences(self, playlist, sequences):\n first_sequence, last_sequence = sequences[0], sequences[-1]\n\n if first_sequence.segment.key and first_sequence.segment.key.method != \"NONE\":\n self.logger.debug(\"Segments in this playlist are encrypted\")\n\n if not CAN_DECRYPT:\n raise StreamError(\"Need pyCrypto or pycryptodome installed to decrypt this stream\")\n\n self.playlist_changed = ([s.num for s in self.playlist_sequences] !=\n [s.num for s in sequences])\n self.playlist_reload_time = (playlist.target_duration or\n last_sequence.segment.duration)\n self.playlist_sequences = sequences\n\n if not self.playlist_changed:\n self.playlist_reload_time = max(self.playlist_reload_time / 2, 1)\n\n if playlist.is_endlist:\n self.playlist_end = last_sequence.num\n\n if self.playlist_sequence < 0:\n if self.playlist_end is None:\n edge_index = -(min(len(sequences), max(int(self.live_edge), 1)))\n edge_sequence = sequences[edge_index]\n self.playlist_sequence = edge_sequence.num\n else:\n self.playlist_sequence = first_sequence.num\n\n def valid_sequence(self, sequence):\n return sequence.num >= self.playlist_sequence\n\n def iter_segments(self):\n while not self.closed:\n for sequence in filter(self.valid_sequence, self.playlist_sequences):\n self.logger.debug(\"Adding segment {0} to queue\", sequence.num)\n yield sequence\n\n # End of stream\n stream_end = self.playlist_end and sequence.num >= self.playlist_end\n if self.closed or stream_end:\n return\n\n self.playlist_sequence = sequence.num + 1\n\n if self.wait(self.playlist_reload_time):\n try:\n self.reload_playlist()\n except StreamError as err:\n self.logger.warning(\"Failed to reload playlist: {0}\", err)\n\n\nclass HLSStreamReader(SegmentedStreamReader):\n __worker__ = HLSStreamWorker\n __writer__ = HLSStreamWriter\n\n def __init__(self, stream, *args, **kwargs):\n SegmentedStreamReader.__init__(self, stream, *args, **kwargs)\n self.logger = stream.session.logger.new_module(\"stream.hls\")\n self.request_params = dict(stream.args)\n self.timeout = stream.session.options.get(\"hls-timeout\")\n\n # These params are reserved for internal use\n self.request_params.pop(\"exception\", None)\n self.request_params.pop(\"stream\", None)\n self.request_params.pop(\"timeout\", None)\n self.request_params.pop(\"url\", None)\n\n\nclass HLSStream(HTTPStream):\n \"\"\"Implementation of the Apple HTTP Live Streaming protocol\n\n *Attributes:*\n\n - :attr:`url` The URL to the HLS playlist.\n - :attr:`args` A :class:`dict` containing keyword arguments passed\n to :meth:`requests.request`, such as headers and cookies.\n\n .. versionchanged:: 1.7.0\n Added *args* attribute.\n\n \"\"\"\n\n __shortname__ = \"hls\"\n\n def __init__(self, session_, url, **args):\n HTTPStream.__init__(self, session_, url, **args)\n\n def __repr__(self):\n return \"<HLSStream({0!r})>\".format(self.url)\n\n def __json__(self):\n json = HTTPStream.__json__(self)\n\n # Pretty sure HLS is GET only.\n del json[\"method\"]\n del json[\"body\"]\n\n return json\n\n def open(self):\n reader = HLSStreamReader(self)\n reader.open()\n\n return reader\n\n @classmethod\n def parse_variant_playlist(cls, session_, url, name_key=\"name\",\n name_prefix=\"\", check_streams=False,\n **request_params):\n \"\"\"Attempts to parse a variant playlist and return its streams.\n\n :param url: The URL of the variant playlist.\n :param name_key: Prefer to use this key as stream name, valid keys are:\n name, pixels, bitrate.\n :param name_prefix: Add this prefix to the stream names.\n :param check_streams: Only allow streams that are accesible.\n \"\"\"\n\n # Backwards compatibility with \"namekey\" and \"nameprefix\" params.\n name_key = request_params.pop(\"namekey\", name_key)\n name_prefix = request_params.pop(\"nameprefix\", name_prefix)\n\n res = session_.http.get(url, exception=IOError, **request_params)\n\n try:\n parser = hls_playlist.load(res.text, base_uri=res.url)\n except ValueError as err:\n raise IOError(\"Failed to parse playlist: {0}\".format(err))\n\n streams = {}\n for playlist in filter(lambda p: not p.is_iframe, parser.playlists):\n names = dict(name=None, pixels=None, bitrate=None)\n\n for media in playlist.media:\n if media.type == \"VIDEO\" and media.name:\n names[\"name\"] = media.name\n\n if playlist.stream_info.resolution:\n width, height = playlist.stream_info.resolution\n names[\"pixels\"] = \"{0}p\".format(height)\n\n if playlist.stream_info.bandwidth:\n bw = playlist.stream_info.bandwidth\n\n if bw >= 1000:\n names[\"bitrate\"] = \"{0}k\".format(int(bw / 1000.0))\n else:\n names[\"bitrate\"] = \"{0}k\".format(bw / 1000.0)\n\n stream_name = (names.get(name_key) or names.get(\"name\") or\n names.get(\"pixels\") or names.get(\"bitrate\"))\n\n if not stream_name or stream_name in streams:\n continue\n\n if check_streams:\n try:\n session_.http.get(playlist.uri, **request_params)\n except Exception:\n continue\n\n stream = HLSStream(session_, playlist.uri, **request_params)\n streams[name_prefix + stream_name] = stream\n\n return streams\n", "path": "src/streamlink/stream/hls.py"}]}
4,042
226
gh_patches_debug_40234
rasdani/github-patches
git_diff
sql-machine-learning__elasticdl-344
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Simplify user API: don't rely on column name to find labels. </issue> <code> [start of edl_k8s_examples/mnist_model.py] 1 import tensorflow as tf 2 import numpy as np 3 4 5 class MnistModel(tf.keras.Model): 6 def __init__(self, channel_last=True): 7 super(MnistModel, self).__init__(name='mnist_model') 8 if channel_last: 9 self._reshape = tf.keras.layers.Reshape((28, 28, 1)) 10 else: 11 self._reshape = tf.keras.layers.Reshape((1, 28, 28)) 12 self._conv1 = tf.keras.layers.Conv2D( 13 32, kernel_size=(3, 3), activation='relu') 14 self._conv2 = tf.keras.layers.Conv2D( 15 64, kernel_size=(3, 3), activation='relu') 16 self._batch_norm = tf.keras.layers.BatchNormalization() 17 self._maxpooling = tf.keras.layers.MaxPooling2D( 18 pool_size=(2, 2)) 19 self._dropout = tf.keras.layers.Dropout(0.25) 20 self._flatten = tf.keras.layers.Flatten() 21 self._dense = tf.keras.layers.Dense(10) 22 23 def call(self, inputs, training=False): 24 x = self._reshape(inputs) 25 x = self._conv1(x) 26 x = self._conv2(x) 27 x = self._batch_norm(x, training=training) 28 x = self._maxpooling(x) 29 if training: 30 x = self._dropout(x, training=training) 31 x = self._flatten(x) 32 x = self._dense(x) 33 return x 34 35 36 model = MnistModel() 37 model.build(input_shape=(1, 28, 28)) 38 39 input_names = ['image'] 40 41 def loss(output, labels): 42 return tf.reduce_mean( 43 tf.nn.sparse_softmax_cross_entropy_with_logits( 44 logits=output, labels=labels['label'])) 45 46 def optimizer(lr=0.1): 47 return tf.train.GradientDescentOptimizer(lr) 48 49 def input_fn(records): 50 image_list = [] 51 label_list = [] 52 # deserialize 53 for r in records: 54 parsed = np.frombuffer(r, dtype="uint8") 55 label = parsed[-1] 56 image = np.resize(parsed[:-1], new_shape=(28, 28)) 57 image = image.astype(np.float32) 58 image /= 255 59 label = label.astype(np.int32) 60 image_list.append(image) 61 label_list.append(label) 62 63 # batching 64 batch_size = len(image_list) 65 images = np.concatenate(image_list, axis=0) 66 images = np.reshape(images, (batch_size, 28, 28)) 67 labels = np.array(label_list) 68 return {'image': images, 'label': labels} 69 [end of edl_k8s_examples/mnist_model.py] [start of elasticdl/worker/worker.py] 1 import tensorflow as tf 2 assert tf.executing_eagerly() 3 4 from google.protobuf import empty_pb2 5 from tensorflow.python.ops import math_ops 6 from elasticdl.proto import master_pb2_grpc 7 from elasticdl.proto import master_pb2 8 from elasticdl.common.ndarray import ndarray_to_tensor, tensor_to_ndarray 9 from elasticdl.common.model_helper import load_user_model 10 import itertools 11 import recordio 12 13 # the default max number of a minibatch retrain as its gradients are not accepted by master. 14 DEFAULT_MAX_MINIBATCH_RETRAIN_NUM = 64 15 16 class Worker(object): 17 """ElasticDL worker""" 18 19 def __init__(self, 20 model_file, 21 channel=None, 22 max_retrain_num=DEFAULT_MAX_MINIBATCH_RETRAIN_NUM): 23 """ 24 Arguments: 25 model_module: A module to define the model 26 channel: grpc channel 27 max_retrain_num: max number of a minibatch retrain as its gradients are not accepted by master 28 """ 29 30 model_module = load_user_model(model_file) 31 self._model = model_module.model 32 self._input_fn = model_module.input_fn 33 self._opt_fn = model_module.optimizer 34 self._loss = model_module.loss 35 self._input_names = model_module.input_names 36 37 if channel is None: 38 self._stub = None 39 else: 40 self._stub = master_pb2_grpc.MasterStub(channel) 41 self._max_retrain_num = max_retrain_num 42 self._model_version = -1 43 44 def get_task(self): 45 """ 46 get task from master 47 """ 48 return self._stub.GetTask(empty_pb2.Empty()) 49 50 def get_model(self, min_version): 51 """ 52 get model from master, and update model_version 53 """ 54 req = master_pb2.GetModelRequest() 55 req.min_version = min_version 56 model = self._stub.GetModel(req) 57 58 for var in self._model.trainable_variables: 59 # Assumes all trainable variables exist in model.param. 60 var.assign( 61 tensor_to_ndarray(model.param[var.name])) 62 self._model_version = model.version 63 64 def report_task_result(self, task_id, err_msg): 65 """ 66 report task result to master 67 """ 68 report = master_pb2.ReportTaskResultRequest() 69 report.task_id = task_id 70 report.err_message = err_msg 71 return self._stub.ReportTaskResult(report) 72 73 def report_gradient(self, grads): 74 """ 75 report gradient to ps, return (accepted, model_version) from rpc call. 76 """ 77 req = master_pb2.ReportGradientRequest() 78 for g, v in zip(grads, self._model.trainable_variables): 79 req.gradient[v.name].CopyFrom( 80 ndarray_to_tensor(g.numpy())) 81 req.model_version = self._model_version 82 res = self._stub.ReportGradient(req) 83 return res.accepted, res.model_version 84 85 def distributed_train(self): 86 """ 87 Distributed training. 88 """ 89 while True: 90 task = self.get_task() 91 if not task.shard_file_name: 92 # No more task 93 break 94 batch_size = task.minibatch_size 95 err_msg = "" 96 try: 97 with recordio.File(task.shard_file_name, "r") as rdio_r: 98 reader = rdio_r.get_reader(task.start, task.end) 99 min_model_version = task.model_version 100 while True: 101 record_buf = list( 102 itertools.islice(reader, 0, batch_size)) 103 if not record_buf: 104 break 105 106 for _ in range(self._max_retrain_num): 107 # TODO: optimize the logic to avoid unnecessary get_model call. 108 self.get_model( 109 max(self._model_version, min_model_version)) 110 111 batch_input_data = self._input_fn(record_buf) 112 113 with tf.GradientTape() as tape: 114 inputs = [] 115 for input_name in self._input_names: 116 inputs.append(batch_input_data[input_name]) 117 if len(inputs) == 1: 118 inputs = inputs[0] 119 outputs = self._model.call(inputs, training=True) 120 loss = self._loss(outputs, batch_input_data) 121 122 # TODO: Add regularization loss if any, 123 # which should be divided by the number of contributing workers. 124 grads = tape.gradient( 125 loss, self._model.trainable_variables) 126 print("Loss is ", loss.numpy()) 127 128 accepted, min_model_version = self.report_gradient( 129 grads) 130 if accepted: 131 break 132 else: 133 # Worker got stuck, fail the task. 134 # TODO: stop the worker if it fails to make any progress for some time. 135 raise RuntimeError("Worker got stuck") 136 137 138 except Exception as ex: 139 err_msg = str(ex) 140 self.report_task_result(task.task_id, err_msg) 141 142 def local_train(self, file_list, batch_size, epoch=1, kwargs=None): 143 """ 144 Local training for local testing. Must in eager mode. 145 Argments: 146 batch_size: batch size in training 147 epoch: the number of epoch in training 148 kwargs: contains a dict of parameters used in training 149 """ 150 optimizer = self._opt_fn() 151 for _ in range(epoch): 152 for f in file_list: 153 with recordio.File(f, "r") as rdio_r: 154 reader = rdio_r.get_reader(0, rdio_r.count()) 155 while True: 156 record_buf = list( 157 itertools.islice(reader, 0, batch_size)) 158 if not record_buf: 159 break 160 161 data = self._input_fn(record_buf) 162 163 with tf.GradientTape() as tape: 164 inputs = [] 165 for input_name in self._input_names: 166 inputs.append(data[input_name]) 167 if len(inputs) == 1: 168 inputs = inputs[0] 169 outputs = self._model.call(inputs, training=True) 170 loss = self._loss(outputs, data) 171 172 # Add regularization loss if any. 173 # Note: for distributed training, the regularization loss should 174 # be divided by the number of contributing workers, which 175 # might be difficult for elasticdl. 176 if self._model.losses: 177 loss += math_ops.add_n(self._model.losses) 178 grads = tape.gradient( 179 loss, self._model.trainable_variables) 180 optimizer.apply_gradients( 181 zip(grads, self._model.trainable_variables)) 182 print("Loss is ", loss.numpy()) 183 [end of elasticdl/worker/worker.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/edl_k8s_examples/mnist_model.py b/edl_k8s_examples/mnist_model.py --- a/edl_k8s_examples/mnist_model.py +++ b/edl_k8s_examples/mnist_model.py @@ -41,7 +41,7 @@ def loss(output, labels): return tf.reduce_mean( tf.nn.sparse_softmax_cross_entropy_with_logits( - logits=output, labels=labels['label'])) + logits=output, labels=labels)) def optimizer(lr=0.1): return tf.train.GradientDescentOptimizer(lr) @@ -65,4 +65,4 @@ images = np.concatenate(image_list, axis=0) images = np.reshape(images, (batch_size, 28, 28)) labels = np.array(label_list) - return {'image': images, 'label': labels} + return ({'image': images}, labels) diff --git a/elasticdl/worker/worker.py b/elasticdl/worker/worker.py --- a/elasticdl/worker/worker.py +++ b/elasticdl/worker/worker.py @@ -108,7 +108,7 @@ self.get_model( max(self._model_version, min_model_version)) - batch_input_data = self._input_fn(record_buf) + batch_input_data, batch_label = self._input_fn(record_buf) with tf.GradientTape() as tape: inputs = [] @@ -117,7 +117,7 @@ if len(inputs) == 1: inputs = inputs[0] outputs = self._model.call(inputs, training=True) - loss = self._loss(outputs, batch_input_data) + loss = self._loss(outputs, batch_label) # TODO: Add regularization loss if any, # which should be divided by the number of contributing workers. @@ -158,7 +158,7 @@ if not record_buf: break - data = self._input_fn(record_buf) + data, labels = self._input_fn(record_buf) with tf.GradientTape() as tape: inputs = [] @@ -167,7 +167,7 @@ if len(inputs) == 1: inputs = inputs[0] outputs = self._model.call(inputs, training=True) - loss = self._loss(outputs, data) + loss = self._loss(outputs, labels) # Add regularization loss if any. # Note: for distributed training, the regularization loss should
{"golden_diff": "diff --git a/edl_k8s_examples/mnist_model.py b/edl_k8s_examples/mnist_model.py\n--- a/edl_k8s_examples/mnist_model.py\n+++ b/edl_k8s_examples/mnist_model.py\n@@ -41,7 +41,7 @@\n def loss(output, labels):\n return tf.reduce_mean(\n tf.nn.sparse_softmax_cross_entropy_with_logits(\n- logits=output, labels=labels['label']))\n+ logits=output, labels=labels))\n \n def optimizer(lr=0.1):\n return tf.train.GradientDescentOptimizer(lr)\n@@ -65,4 +65,4 @@\n images = np.concatenate(image_list, axis=0)\n images = np.reshape(images, (batch_size, 28, 28))\n labels = np.array(label_list)\n- return {'image': images, 'label': labels}\n+ return ({'image': images}, labels)\ndiff --git a/elasticdl/worker/worker.py b/elasticdl/worker/worker.py\n--- a/elasticdl/worker/worker.py\n+++ b/elasticdl/worker/worker.py\n@@ -108,7 +108,7 @@\n self.get_model(\n max(self._model_version, min_model_version))\n \n- batch_input_data = self._input_fn(record_buf)\n+ batch_input_data, batch_label = self._input_fn(record_buf)\n \n with tf.GradientTape() as tape:\n inputs = []\n@@ -117,7 +117,7 @@\n if len(inputs) == 1:\n inputs = inputs[0]\n outputs = self._model.call(inputs, training=True)\n- loss = self._loss(outputs, batch_input_data)\n+ loss = self._loss(outputs, batch_label)\n \n # TODO: Add regularization loss if any,\n # which should be divided by the number of contributing workers.\n@@ -158,7 +158,7 @@\n if not record_buf:\n break\n \n- data = self._input_fn(record_buf)\n+ data, labels = self._input_fn(record_buf)\n \n with tf.GradientTape() as tape:\n inputs = []\n@@ -167,7 +167,7 @@\n if len(inputs) == 1:\n inputs = inputs[0]\n outputs = self._model.call(inputs, training=True)\n- loss = self._loss(outputs, data)\n+ loss = self._loss(outputs, labels)\n \n # Add regularization loss if any.\n # Note: for distributed training, the regularization loss should\n", "issue": "Simplify user API: don't rely on column name to find labels.\n\n", "before_files": [{"content": "import tensorflow as tf\nimport numpy as np\n\n\nclass MnistModel(tf.keras.Model):\n def __init__(self, channel_last=True):\n super(MnistModel, self).__init__(name='mnist_model')\n if channel_last:\n self._reshape = tf.keras.layers.Reshape((28, 28, 1))\n else:\n self._reshape = tf.keras.layers.Reshape((1, 28, 28))\n self._conv1 = tf.keras.layers.Conv2D(\n 32, kernel_size=(3, 3), activation='relu')\n self._conv2 = tf.keras.layers.Conv2D(\n 64, kernel_size=(3, 3), activation='relu')\n self._batch_norm = tf.keras.layers.BatchNormalization()\n self._maxpooling = tf.keras.layers.MaxPooling2D(\n pool_size=(2, 2))\n self._dropout = tf.keras.layers.Dropout(0.25)\n self._flatten = tf.keras.layers.Flatten()\n self._dense = tf.keras.layers.Dense(10)\n\n def call(self, inputs, training=False):\n x = self._reshape(inputs)\n x = self._conv1(x)\n x = self._conv2(x)\n x = self._batch_norm(x, training=training)\n x = self._maxpooling(x)\n if training:\n x = self._dropout(x, training=training)\n x = self._flatten(x)\n x = self._dense(x)\n return x\n\n\nmodel = MnistModel()\nmodel.build(input_shape=(1, 28, 28))\n\ninput_names = ['image']\n \ndef loss(output, labels):\n return tf.reduce_mean(\n tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=output, labels=labels['label']))\n\ndef optimizer(lr=0.1):\n return tf.train.GradientDescentOptimizer(lr)\n\ndef input_fn(records):\n image_list = []\n label_list = []\n # deserialize\n for r in records:\n parsed = np.frombuffer(r, dtype=\"uint8\")\n label = parsed[-1]\n image = np.resize(parsed[:-1], new_shape=(28, 28))\n image = image.astype(np.float32)\n image /= 255\n label = label.astype(np.int32)\n image_list.append(image)\n label_list.append(label)\n\n # batching\n batch_size = len(image_list)\n images = np.concatenate(image_list, axis=0)\n images = np.reshape(images, (batch_size, 28, 28))\n labels = np.array(label_list)\n return {'image': images, 'label': labels}\n", "path": "edl_k8s_examples/mnist_model.py"}, {"content": "import tensorflow as tf\nassert tf.executing_eagerly()\n\nfrom google.protobuf import empty_pb2\nfrom tensorflow.python.ops import math_ops\nfrom elasticdl.proto import master_pb2_grpc\nfrom elasticdl.proto import master_pb2\nfrom elasticdl.common.ndarray import ndarray_to_tensor, tensor_to_ndarray\nfrom elasticdl.common.model_helper import load_user_model\nimport itertools\nimport recordio\n\n# the default max number of a minibatch retrain as its gradients are not accepted by master.\nDEFAULT_MAX_MINIBATCH_RETRAIN_NUM = 64\n\nclass Worker(object):\n \"\"\"ElasticDL worker\"\"\"\n\n def __init__(self,\n model_file,\n channel=None,\n max_retrain_num=DEFAULT_MAX_MINIBATCH_RETRAIN_NUM):\n \"\"\"\n Arguments:\n model_module: A module to define the model\n channel: grpc channel\n max_retrain_num: max number of a minibatch retrain as its gradients are not accepted by master\n \"\"\"\n\n model_module = load_user_model(model_file)\n self._model = model_module.model\n self._input_fn = model_module.input_fn \n self._opt_fn = model_module.optimizer\n self._loss = model_module.loss\n self._input_names = model_module.input_names\n\n if channel is None:\n self._stub = None\n else:\n self._stub = master_pb2_grpc.MasterStub(channel)\n self._max_retrain_num = max_retrain_num\n self._model_version = -1\n\n def get_task(self):\n \"\"\"\n get task from master\n \"\"\"\n return self._stub.GetTask(empty_pb2.Empty())\n\n def get_model(self, min_version):\n \"\"\"\n get model from master, and update model_version\n \"\"\"\n req = master_pb2.GetModelRequest()\n req.min_version = min_version\n model = self._stub.GetModel(req)\n\n for var in self._model.trainable_variables:\n # Assumes all trainable variables exist in model.param.\n var.assign(\n tensor_to_ndarray(model.param[var.name]))\n self._model_version = model.version\n\n def report_task_result(self, task_id, err_msg):\n \"\"\"\n report task result to master\n \"\"\"\n report = master_pb2.ReportTaskResultRequest()\n report.task_id = task_id\n report.err_message = err_msg\n return self._stub.ReportTaskResult(report)\n\n def report_gradient(self, grads):\n \"\"\"\n report gradient to ps, return (accepted, model_version) from rpc call.\n \"\"\"\n req = master_pb2.ReportGradientRequest()\n for g, v in zip(grads, self._model.trainable_variables):\n req.gradient[v.name].CopyFrom(\n ndarray_to_tensor(g.numpy()))\n req.model_version = self._model_version\n res = self._stub.ReportGradient(req)\n return res.accepted, res.model_version\n\n def distributed_train(self):\n \"\"\"\n Distributed training.\n \"\"\"\n while True:\n task = self.get_task()\n if not task.shard_file_name:\n # No more task\n break\n batch_size = task.minibatch_size\n err_msg = \"\"\n try:\n with recordio.File(task.shard_file_name, \"r\") as rdio_r:\n reader = rdio_r.get_reader(task.start, task.end)\n min_model_version = task.model_version\n while True:\n record_buf = list(\n itertools.islice(reader, 0, batch_size))\n if not record_buf:\n break\n\n for _ in range(self._max_retrain_num):\n # TODO: optimize the logic to avoid unnecessary get_model call.\n self.get_model(\n max(self._model_version, min_model_version))\n\n batch_input_data = self._input_fn(record_buf)\n\n with tf.GradientTape() as tape:\n inputs = []\n for input_name in self._input_names:\n inputs.append(batch_input_data[input_name])\n if len(inputs) == 1:\n inputs = inputs[0]\n outputs = self._model.call(inputs, training=True)\n loss = self._loss(outputs, batch_input_data)\n\n # TODO: Add regularization loss if any,\n # which should be divided by the number of contributing workers.\n grads = tape.gradient(\n loss, self._model.trainable_variables)\n print(\"Loss is \", loss.numpy())\n\n accepted, min_model_version = self.report_gradient(\n grads)\n if accepted:\n break\n else:\n # Worker got stuck, fail the task.\n # TODO: stop the worker if it fails to make any progress for some time.\n raise RuntimeError(\"Worker got stuck\")\n\n\n except Exception as ex:\n err_msg = str(ex)\n self.report_task_result(task.task_id, err_msg)\n\n def local_train(self, file_list, batch_size, epoch=1, kwargs=None):\n \"\"\"\n Local training for local testing. Must in eager mode.\n Argments:\n batch_size: batch size in training\n epoch: the number of epoch in training\n kwargs: contains a dict of parameters used in training\n \"\"\"\n optimizer = self._opt_fn()\n for _ in range(epoch):\n for f in file_list:\n with recordio.File(f, \"r\") as rdio_r:\n reader = rdio_r.get_reader(0, rdio_r.count())\n while True:\n record_buf = list(\n itertools.islice(reader, 0, batch_size))\n if not record_buf:\n break\n\n data = self._input_fn(record_buf)\n\n with tf.GradientTape() as tape:\n inputs = []\n for input_name in self._input_names:\n inputs.append(data[input_name])\n if len(inputs) == 1:\n inputs = inputs[0]\n outputs = self._model.call(inputs, training=True)\n loss = self._loss(outputs, data)\n\n # Add regularization loss if any.\n # Note: for distributed training, the regularization loss should\n # be divided by the number of contributing workers, which\n # might be difficult for elasticdl.\n if self._model.losses:\n loss += math_ops.add_n(self._model.losses)\n grads = tape.gradient(\n loss, self._model.trainable_variables)\n optimizer.apply_gradients(\n zip(grads, self._model.trainable_variables))\n print(\"Loss is \", loss.numpy())\n", "path": "elasticdl/worker/worker.py"}]}
3,093
564
gh_patches_debug_33223
rasdani/github-patches
git_diff
lra__mackup-1292
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Mojave, new workstation Hi, I'm on a new workstation with Dropbox installed. I installed mackup with pip and ran 'mackup restore' and got this: Traceback (most recent call last): File "/usr/local/bin/mackup", line 9, in <module> load_entry_point('mackup==0.8.20', 'console_scripts', 'mackup')() File "/Library/Python/2.7/site-packages/mackup/main.py", line 65, in main app_db = ApplicationsDatabase() File "/Library/Python/2.7/site-packages/mackup/appsdb.py", line 63, in __init__ .format(xdg_config_home)) ValueError: $XDG_CONFIG_HOME: /Users/stephens/.config does not exist </issue> <code> [start of mackup/appsdb.py] 1 """ 2 The applications database. 3 4 The Applications Database provides an easy to use interface to load application 5 data from the Mackup Database (files). 6 """ 7 import os 8 9 try: 10 import configparser 11 except ImportError: 12 import ConfigParser as configparser 13 14 15 from .constants import APPS_DIR 16 from .constants import CUSTOM_APPS_DIR 17 18 19 class ApplicationsDatabase(object): 20 21 """Database containing all the configured applications.""" 22 23 def __init__(self): 24 """Create a ApplicationsDatabase instance.""" 25 # Build the dict that will contain the properties of each application 26 self.apps = dict() 27 28 for config_file in ApplicationsDatabase.get_config_files(): 29 config = configparser.SafeConfigParser(allow_no_value=True) 30 31 # Needed to not lowercase the configuration_files in the ini files 32 config.optionxform = str 33 34 if config.read(config_file): 35 # Get the filename without the directory name 36 filename = os.path.basename(config_file) 37 # The app name is the cfg filename with the extension 38 app_name = filename[:-len('.cfg')] 39 40 # Start building a dict for this app 41 self.apps[app_name] = dict() 42 43 # Add the fancy name for the app, for display purpose 44 app_pretty_name = config.get('application', 'name') 45 self.apps[app_name]['name'] = app_pretty_name 46 47 # Add the configuration files to sync 48 self.apps[app_name]['configuration_files'] = set() 49 if config.has_section('configuration_files'): 50 for path in config.options('configuration_files'): 51 if path.startswith('/'): 52 raise ValueError('Unsupported absolute path: {}' 53 .format(path)) 54 self.apps[app_name]['configuration_files'].add(path) 55 56 # Add the XDG configuration files to sync 57 home = os.path.expanduser('~/') 58 failobj = "{}.config".format(home) 59 xdg_config_home = os.environ.get('XDG_CONFIG_HOME', failobj) 60 if xdg_config_home: 61 if not os.path.exists(xdg_config_home): 62 raise ValueError('$XDG_CONFIG_HOME: {} does not exist' 63 .format(xdg_config_home)) 64 if not xdg_config_home.startswith(home): 65 raise ValueError('$XDG_CONFIG_HOME: {} must be ' 66 'somewhere within your home ' 67 'directory: {}' 68 .format(xdg_config_home, home)) 69 if config.has_section('xdg_configuration_files'): 70 for path in config.options('xdg_configuration_files'): 71 if path.startswith('/'): 72 raise ValueError('Unsupported absolute path: ' 73 '{}' 74 .format(path)) 75 path = os.path.join(xdg_config_home, path) 76 path = path.replace(home, '') 77 (self.apps[app_name]['configuration_files'] 78 .add(path)) 79 80 @staticmethod 81 def get_config_files(): 82 """ 83 Return the application configuration files. 84 85 Return a list of configuration files describing the apps supported by 86 Mackup. The files return are absolute full path to those files. 87 e.g. /usr/lib/mackup/applications/bash.cfg 88 89 Only one config file per application should be returned, custom config 90 having a priority over stock config. 91 92 Returns: 93 set of strings. 94 """ 95 # Configure the config parser 96 apps_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 97 APPS_DIR) 98 custom_apps_dir = os.path.join(os.environ['HOME'], CUSTOM_APPS_DIR) 99 100 # List of stock application config files 101 config_files = set() 102 103 # Temp list of user added app config file names 104 custom_files = set() 105 106 # Get the list of custom application config files first 107 if os.path.isdir(custom_apps_dir): 108 for filename in os.listdir(custom_apps_dir): 109 if filename.endswith('.cfg'): 110 config_files.add(os.path.join(custom_apps_dir, 111 filename)) 112 # Also add it to the set of custom apps, so that we don't 113 # add the stock config for the same app too 114 custom_files.add(filename) 115 116 # Add the default provided app config files, but only if those are not 117 # customized, as we don't want to overwrite custom app config. 118 for filename in os.listdir(apps_dir): 119 if filename.endswith('.cfg') and filename not in custom_files: 120 config_files.add(os.path.join(apps_dir, filename)) 121 122 return config_files 123 124 def get_name(self, name): 125 """ 126 Return the fancy name of an application. 127 128 Args: 129 name (str) 130 131 Returns: 132 str 133 """ 134 return self.apps[name]['name'] 135 136 def get_files(self, name): 137 """ 138 Return the list of config files of an application. 139 140 Args: 141 name (str) 142 143 Returns: 144 set of str. 145 """ 146 return self.apps[name]['configuration_files'] 147 148 def get_app_names(self): 149 """ 150 Return application names. 151 152 Return the list of application names that are available in the 153 database. 154 155 Returns: 156 set of str. 157 """ 158 app_names = set() 159 for name in self.apps: 160 app_names.add(name) 161 162 return app_names 163 164 def get_pretty_app_names(self): 165 """ 166 Return the list of pretty app names that are available in the database. 167 168 Returns: 169 set of str. 170 """ 171 pretty_app_names = set() 172 for app_name in self.get_app_names(): 173 pretty_app_names.add(self.get_name(app_name)) 174 175 return pretty_app_names 176 [end of mackup/appsdb.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mackup/appsdb.py b/mackup/appsdb.py --- a/mackup/appsdb.py +++ b/mackup/appsdb.py @@ -57,25 +57,21 @@ home = os.path.expanduser('~/') failobj = "{}.config".format(home) xdg_config_home = os.environ.get('XDG_CONFIG_HOME', failobj) - if xdg_config_home: - if not os.path.exists(xdg_config_home): - raise ValueError('$XDG_CONFIG_HOME: {} does not exist' - .format(xdg_config_home)) - if not xdg_config_home.startswith(home): - raise ValueError('$XDG_CONFIG_HOME: {} must be ' - 'somewhere within your home ' - 'directory: {}' - .format(xdg_config_home, home)) - if config.has_section('xdg_configuration_files'): - for path in config.options('xdg_configuration_files'): - if path.startswith('/'): - raise ValueError('Unsupported absolute path: ' - '{}' - .format(path)) - path = os.path.join(xdg_config_home, path) - path = path.replace(home, '') - (self.apps[app_name]['configuration_files'] - .add(path)) + if not xdg_config_home.startswith(home): + raise ValueError('$XDG_CONFIG_HOME: {} must be ' + 'somewhere within your home ' + 'directory: {}' + .format(xdg_config_home, home)) + if config.has_section('xdg_configuration_files'): + for path in config.options('xdg_configuration_files'): + if path.startswith('/'): + raise ValueError('Unsupported absolute path: ' + '{}' + .format(path)) + path = os.path.join(xdg_config_home, path) + path = path.replace(home, '') + (self.apps[app_name]['configuration_files'] + .add(path)) @staticmethod def get_config_files():
{"golden_diff": "diff --git a/mackup/appsdb.py b/mackup/appsdb.py\n--- a/mackup/appsdb.py\n+++ b/mackup/appsdb.py\n@@ -57,25 +57,21 @@\n home = os.path.expanduser('~/')\n failobj = \"{}.config\".format(home)\n xdg_config_home = os.environ.get('XDG_CONFIG_HOME', failobj)\n- if xdg_config_home:\n- if not os.path.exists(xdg_config_home):\n- raise ValueError('$XDG_CONFIG_HOME: {} does not exist'\n- .format(xdg_config_home))\n- if not xdg_config_home.startswith(home):\n- raise ValueError('$XDG_CONFIG_HOME: {} must be '\n- 'somewhere within your home '\n- 'directory: {}'\n- .format(xdg_config_home, home))\n- if config.has_section('xdg_configuration_files'):\n- for path in config.options('xdg_configuration_files'):\n- if path.startswith('/'):\n- raise ValueError('Unsupported absolute path: '\n- '{}'\n- .format(path))\n- path = os.path.join(xdg_config_home, path)\n- path = path.replace(home, '')\n- (self.apps[app_name]['configuration_files']\n- .add(path))\n+ if not xdg_config_home.startswith(home):\n+ raise ValueError('$XDG_CONFIG_HOME: {} must be '\n+ 'somewhere within your home '\n+ 'directory: {}'\n+ .format(xdg_config_home, home))\n+ if config.has_section('xdg_configuration_files'):\n+ for path in config.options('xdg_configuration_files'):\n+ if path.startswith('/'):\n+ raise ValueError('Unsupported absolute path: '\n+ '{}'\n+ .format(path))\n+ path = os.path.join(xdg_config_home, path)\n+ path = path.replace(home, '')\n+ (self.apps[app_name]['configuration_files']\n+ .add(path))\n \n @staticmethod\n def get_config_files():\n", "issue": "Mojave, new workstation\nHi, I'm on a new workstation with Dropbox installed. I installed mackup with pip and ran 'mackup restore' and got this:\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/local/bin/mackup\", line 9, in <module>\r\n load_entry_point('mackup==0.8.20', 'console_scripts', 'mackup')()\r\n File \"/Library/Python/2.7/site-packages/mackup/main.py\", line 65, in main\r\n app_db = ApplicationsDatabase()\r\n File \"/Library/Python/2.7/site-packages/mackup/appsdb.py\", line 63, in __init__\r\n .format(xdg_config_home))\r\nValueError: $XDG_CONFIG_HOME: /Users/stephens/.config does not exist\n", "before_files": [{"content": "\"\"\"\nThe applications database.\n\nThe Applications Database provides an easy to use interface to load application\ndata from the Mackup Database (files).\n\"\"\"\nimport os\n\ntry:\n import configparser\nexcept ImportError:\n import ConfigParser as configparser\n\n\nfrom .constants import APPS_DIR\nfrom .constants import CUSTOM_APPS_DIR\n\n\nclass ApplicationsDatabase(object):\n\n \"\"\"Database containing all the configured applications.\"\"\"\n\n def __init__(self):\n \"\"\"Create a ApplicationsDatabase instance.\"\"\"\n # Build the dict that will contain the properties of each application\n self.apps = dict()\n\n for config_file in ApplicationsDatabase.get_config_files():\n config = configparser.SafeConfigParser(allow_no_value=True)\n\n # Needed to not lowercase the configuration_files in the ini files\n config.optionxform = str\n\n if config.read(config_file):\n # Get the filename without the directory name\n filename = os.path.basename(config_file)\n # The app name is the cfg filename with the extension\n app_name = filename[:-len('.cfg')]\n\n # Start building a dict for this app\n self.apps[app_name] = dict()\n\n # Add the fancy name for the app, for display purpose\n app_pretty_name = config.get('application', 'name')\n self.apps[app_name]['name'] = app_pretty_name\n\n # Add the configuration files to sync\n self.apps[app_name]['configuration_files'] = set()\n if config.has_section('configuration_files'):\n for path in config.options('configuration_files'):\n if path.startswith('/'):\n raise ValueError('Unsupported absolute path: {}'\n .format(path))\n self.apps[app_name]['configuration_files'].add(path)\n\n # Add the XDG configuration files to sync\n home = os.path.expanduser('~/')\n failobj = \"{}.config\".format(home)\n xdg_config_home = os.environ.get('XDG_CONFIG_HOME', failobj)\n if xdg_config_home:\n if not os.path.exists(xdg_config_home):\n raise ValueError('$XDG_CONFIG_HOME: {} does not exist'\n .format(xdg_config_home))\n if not xdg_config_home.startswith(home):\n raise ValueError('$XDG_CONFIG_HOME: {} must be '\n 'somewhere within your home '\n 'directory: {}'\n .format(xdg_config_home, home))\n if config.has_section('xdg_configuration_files'):\n for path in config.options('xdg_configuration_files'):\n if path.startswith('/'):\n raise ValueError('Unsupported absolute path: '\n '{}'\n .format(path))\n path = os.path.join(xdg_config_home, path)\n path = path.replace(home, '')\n (self.apps[app_name]['configuration_files']\n .add(path))\n\n @staticmethod\n def get_config_files():\n \"\"\"\n Return the application configuration files.\n\n Return a list of configuration files describing the apps supported by\n Mackup. The files return are absolute full path to those files.\n e.g. /usr/lib/mackup/applications/bash.cfg\n\n Only one config file per application should be returned, custom config\n having a priority over stock config.\n\n Returns:\n set of strings.\n \"\"\"\n # Configure the config parser\n apps_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),\n APPS_DIR)\n custom_apps_dir = os.path.join(os.environ['HOME'], CUSTOM_APPS_DIR)\n\n # List of stock application config files\n config_files = set()\n\n # Temp list of user added app config file names\n custom_files = set()\n\n # Get the list of custom application config files first\n if os.path.isdir(custom_apps_dir):\n for filename in os.listdir(custom_apps_dir):\n if filename.endswith('.cfg'):\n config_files.add(os.path.join(custom_apps_dir,\n filename))\n # Also add it to the set of custom apps, so that we don't\n # add the stock config for the same app too\n custom_files.add(filename)\n\n # Add the default provided app config files, but only if those are not\n # customized, as we don't want to overwrite custom app config.\n for filename in os.listdir(apps_dir):\n if filename.endswith('.cfg') and filename not in custom_files:\n config_files.add(os.path.join(apps_dir, filename))\n\n return config_files\n\n def get_name(self, name):\n \"\"\"\n Return the fancy name of an application.\n\n Args:\n name (str)\n\n Returns:\n str\n \"\"\"\n return self.apps[name]['name']\n\n def get_files(self, name):\n \"\"\"\n Return the list of config files of an application.\n\n Args:\n name (str)\n\n Returns:\n set of str.\n \"\"\"\n return self.apps[name]['configuration_files']\n\n def get_app_names(self):\n \"\"\"\n Return application names.\n\n Return the list of application names that are available in the\n database.\n\n Returns:\n set of str.\n \"\"\"\n app_names = set()\n for name in self.apps:\n app_names.add(name)\n\n return app_names\n\n def get_pretty_app_names(self):\n \"\"\"\n Return the list of pretty app names that are available in the database.\n\n Returns:\n set of str.\n \"\"\"\n pretty_app_names = set()\n for app_name in self.get_app_names():\n pretty_app_names.add(self.get_name(app_name))\n\n return pretty_app_names\n", "path": "mackup/appsdb.py"}]}
2,300
434
gh_patches_debug_24043
rasdani/github-patches
git_diff
mozilla__telemetry-analysis-service-684
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Better naming for scheduled jobs In #459 identifiers for adhoc clusters and scheduled jobs were changed to be randomly generated. This is fine for a single-use cluster, but makes notifications for scheduled jobs meaningless. For example, I received a notification today that job "fervent-snyder-2799" has failed. It is owned by someone else, but emails are cc'd to telemetry-alerts as well (general audience to monitor for important failures). I would prefer that the name for a scheduled job be reflective of what is being done in the job. Alternatively, we could ensure that the "description" field has been filled in and include that information in the notification emails. </issue> <code> [start of atmo/jobs/views.py] 1 # This Source Code Form is subject to the terms of the Mozilla Public 2 # License, v. 2.0. If a copy of the MPL was not distributed with this 3 # file, you can obtain one at http://mozilla.org/MPL/2.0/. 4 import logging 5 6 from botocore.exceptions import ClientError 7 from django.conf import settings 8 from django.contrib.auth.decorators import login_required 9 from django.contrib import messages 10 from django.http import (HttpResponse, HttpResponseNotFound, 11 StreamingHttpResponse) 12 from django.shortcuts import redirect, render, get_object_or_404 13 from django.template.response import TemplateResponse 14 from django.utils import timezone 15 from django.utils.safestring import mark_safe 16 from django.utils.text import get_valid_filename 17 18 from .. import names 19 from ..clusters.models import EMRRelease 20 from ..decorators import (change_permission_required, 21 delete_permission_required, modified_date, 22 view_permission_required) 23 from ..models import next_field_value 24 from .forms import EditSparkJobForm, NewSparkJobForm, SparkJobAvailableForm 25 from .models import SparkJob 26 27 logger = logging.getLogger("django") 28 29 30 @login_required 31 def check_identifier_available(request): 32 """ 33 Given a Spark job identifier checks if one already exists. 34 """ 35 form = SparkJobAvailableForm(request.GET) 36 if form.is_valid(): 37 identifier = form.cleaned_data['identifier'] 38 if SparkJob.objects.filter(identifier=identifier).exists(): 39 response = HttpResponse('identifier unavailable') 40 else: 41 response = HttpResponseNotFound('identifier available') 42 else: 43 response = HttpResponseNotFound('identifier invalid') 44 return response 45 46 47 @login_required 48 def new_spark_job(request): 49 """ 50 View to schedule a new Spark job to run on AWS EMR. 51 """ 52 identifier = names.random_scientist() 53 next_identifier = next_field_value(SparkJob, 'identifier', identifier) 54 initial = { 55 'identifier': next_identifier, 56 'size': 1, 57 'interval_in_hours': SparkJob.INTERVAL_WEEKLY, 58 'job_timeout': 24, 59 'start_date': timezone.now(), 60 'emr_release': EMRRelease.objects.stable().first(), 61 } 62 form = NewSparkJobForm(request.user, initial=initial) 63 if request.method == 'POST': 64 form = NewSparkJobForm( 65 request.user, 66 data=request.POST, 67 files=request.FILES, 68 initial=initial, 69 ) 70 if form.is_valid(): 71 # this will also magically create the spark job for us 72 spark_job = form.save() 73 return redirect(spark_job) 74 75 context = { 76 'form': form, 77 } 78 return render(request, 'atmo/jobs/new.html', context) 79 80 81 @login_required 82 @change_permission_required(SparkJob) 83 def edit_spark_job(request, id): 84 """ 85 View to edit a scheduled Spark job that runs on AWS EMR. 86 """ 87 spark_job = SparkJob.objects.get(pk=id) 88 form = EditSparkJobForm(request.user, instance=spark_job) 89 if request.method == 'POST': 90 form = EditSparkJobForm( 91 request.user, 92 data=request.POST, 93 files=request.FILES, 94 instance=spark_job, 95 ) 96 if form.is_valid(): 97 # this will also update the job for us 98 spark_job = form.save() 99 return redirect(spark_job) 100 context = { 101 'form': form, 102 } 103 return render(request, 'atmo/jobs/edit.html', context) 104 105 106 @login_required 107 @delete_permission_required(SparkJob) 108 def delete_spark_job(request, id): 109 """ 110 View to delete a scheduled Spark job and then redirects to the dashboard. 111 """ 112 spark_job = SparkJob.objects.get(pk=id) 113 if request.method == 'POST': 114 spark_job.delete() 115 return redirect('dashboard') 116 context = { 117 'spark_job': spark_job, 118 } 119 return render(request, 'atmo/jobs/delete.html', context=context) 120 121 122 @login_required 123 @view_permission_required(SparkJob) 124 @modified_date 125 def detail_spark_job(request, id): 126 """ 127 View to show the details for the scheduled Spark job with the given ID. 128 """ 129 spark_job = SparkJob.objects.get(pk=id) 130 context = { 131 'spark_job': spark_job, 132 } 133 if spark_job.latest_run: 134 context['modified_date'] = spark_job.latest_run.modified_at 135 return TemplateResponse(request, 'atmo/jobs/detail.html', context=context) 136 137 138 @login_required 139 @view_permission_required(SparkJob) 140 @modified_date 141 def detail_zeppelin_job(request, id): 142 """ 143 View to show the details for the scheduled Zeppelin job with the given ID. 144 """ 145 spark_job = get_object_or_404(SparkJob, pk=id) 146 response = '' 147 if spark_job.results: 148 markdown_url = ''.join([x for x in spark_job.results['data'] if x.endswith('md')]) 149 bucket = settings.AWS_CONFIG['PUBLIC_DATA_BUCKET'] 150 markdown_file = spark_job.provisioner.s3.get_object(Bucket=bucket, 151 Key=markdown_url) 152 response = markdown_file['Body'].read().decode('utf-8') 153 154 context = { 155 'markdown': response 156 } 157 return TemplateResponse(request, 'atmo/jobs/zeppelin_notebook.html', context=context) 158 159 160 @login_required 161 @view_permission_required(SparkJob) 162 def download_spark_job(request, id): 163 """ 164 Download the notebook file for the scheduled Spark job with the given ID. 165 """ 166 spark_job = SparkJob.objects.get(pk=id) 167 response = StreamingHttpResponse( 168 spark_job.notebook_s3_object['Body'].read().decode('utf-8'), 169 content_type='application/x-ipynb+json', 170 ) 171 response['Content-Disposition'] = ( 172 'attachment; filename=%s' % 173 get_valid_filename(spark_job.notebook_name) 174 ) 175 response['Content-Length'] = spark_job.notebook_s3_object['ContentLength'] 176 return response 177 178 179 @login_required 180 @view_permission_required(SparkJob) 181 def run_spark_job(request, id): 182 """ 183 Run a scheduled Spark job right now, out of sync with its actual schedule. 184 185 This will actively ask for confirmation to run the Spark job. 186 """ 187 spark_job = SparkJob.objects.get(pk=id) 188 if not spark_job.is_runnable: 189 messages.error( 190 request, 191 mark_safe( 192 '<h4>Run now unavailable.</h4>' 193 "The Spark job can't be run manually at this time. Please try again later." 194 ) 195 ) 196 return redirect(spark_job) 197 198 if request.method == 'POST': 199 if spark_job.latest_run: 200 try: 201 spark_job.latest_run.sync() 202 except ClientError: 203 messages.error( 204 request, 205 mark_safe( 206 '<h4>Spark job API error</h4>' 207 "The Spark job can't be run at the moment since there was a " 208 "problem with fetching the status of the previous job run. " 209 "Please try again later." 210 ) 211 ) 212 return redirect(spark_job) 213 214 spark_job.run() 215 latest_run = spark_job.get_latest_run() 216 if latest_run: 217 schedule_entry = spark_job.schedule.get() 218 schedule_entry.reschedule( 219 last_run_at=spark_job.latest_run.scheduled_at, 220 ) 221 return redirect(spark_job) 222 223 context = { 224 'spark_job': spark_job, 225 } 226 return render(request, 'atmo/jobs/run.html', context=context) 227 [end of atmo/jobs/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/atmo/jobs/views.py b/atmo/jobs/views.py --- a/atmo/jobs/views.py +++ b/atmo/jobs/views.py @@ -14,12 +14,10 @@ from django.utils.safestring import mark_safe from django.utils.text import get_valid_filename -from .. import names from ..clusters.models import EMRRelease from ..decorators import (change_permission_required, delete_permission_required, modified_date, view_permission_required) -from ..models import next_field_value from .forms import EditSparkJobForm, NewSparkJobForm, SparkJobAvailableForm from .models import SparkJob @@ -48,10 +46,8 @@ """ View to schedule a new Spark job to run on AWS EMR. """ - identifier = names.random_scientist() - next_identifier = next_field_value(SparkJob, 'identifier', identifier) initial = { - 'identifier': next_identifier, + 'identifier': '', 'size': 1, 'interval_in_hours': SparkJob.INTERVAL_WEEKLY, 'job_timeout': 24,
{"golden_diff": "diff --git a/atmo/jobs/views.py b/atmo/jobs/views.py\n--- a/atmo/jobs/views.py\n+++ b/atmo/jobs/views.py\n@@ -14,12 +14,10 @@\n from django.utils.safestring import mark_safe\n from django.utils.text import get_valid_filename\n \n-from .. import names\n from ..clusters.models import EMRRelease\n from ..decorators import (change_permission_required,\n delete_permission_required, modified_date,\n view_permission_required)\n-from ..models import next_field_value\n from .forms import EditSparkJobForm, NewSparkJobForm, SparkJobAvailableForm\n from .models import SparkJob\n \n@@ -48,10 +46,8 @@\n \"\"\"\n View to schedule a new Spark job to run on AWS EMR.\n \"\"\"\n- identifier = names.random_scientist()\n- next_identifier = next_field_value(SparkJob, 'identifier', identifier)\n initial = {\n- 'identifier': next_identifier,\n+ 'identifier': '',\n 'size': 1,\n 'interval_in_hours': SparkJob.INTERVAL_WEEKLY,\n 'job_timeout': 24,\n", "issue": "Better naming for scheduled jobs\nIn #459 identifiers for adhoc clusters and scheduled jobs were changed to be randomly generated. This is fine for a single-use cluster, but makes notifications for scheduled jobs meaningless. \r\n\r\nFor example, I received a notification today that job \"fervent-snyder-2799\" has failed. It is owned by someone else, but emails are cc'd to telemetry-alerts as well (general audience to monitor for important failures).\r\n\r\nI would prefer that the name for a scheduled job be reflective of what is being done in the job. \r\n\r\nAlternatively, we could ensure that the \"description\" field has been filled in and include that information in the notification emails.\n", "before_files": [{"content": "# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, you can obtain one at http://mozilla.org/MPL/2.0/.\nimport logging\n\nfrom botocore.exceptions import ClientError\nfrom django.conf import settings\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom django.http import (HttpResponse, HttpResponseNotFound,\n StreamingHttpResponse)\nfrom django.shortcuts import redirect, render, get_object_or_404\nfrom django.template.response import TemplateResponse\nfrom django.utils import timezone\nfrom django.utils.safestring import mark_safe\nfrom django.utils.text import get_valid_filename\n\nfrom .. import names\nfrom ..clusters.models import EMRRelease\nfrom ..decorators import (change_permission_required,\n delete_permission_required, modified_date,\n view_permission_required)\nfrom ..models import next_field_value\nfrom .forms import EditSparkJobForm, NewSparkJobForm, SparkJobAvailableForm\nfrom .models import SparkJob\n\nlogger = logging.getLogger(\"django\")\n\n\n@login_required\ndef check_identifier_available(request):\n \"\"\"\n Given a Spark job identifier checks if one already exists.\n \"\"\"\n form = SparkJobAvailableForm(request.GET)\n if form.is_valid():\n identifier = form.cleaned_data['identifier']\n if SparkJob.objects.filter(identifier=identifier).exists():\n response = HttpResponse('identifier unavailable')\n else:\n response = HttpResponseNotFound('identifier available')\n else:\n response = HttpResponseNotFound('identifier invalid')\n return response\n\n\n@login_required\ndef new_spark_job(request):\n \"\"\"\n View to schedule a new Spark job to run on AWS EMR.\n \"\"\"\n identifier = names.random_scientist()\n next_identifier = next_field_value(SparkJob, 'identifier', identifier)\n initial = {\n 'identifier': next_identifier,\n 'size': 1,\n 'interval_in_hours': SparkJob.INTERVAL_WEEKLY,\n 'job_timeout': 24,\n 'start_date': timezone.now(),\n 'emr_release': EMRRelease.objects.stable().first(),\n }\n form = NewSparkJobForm(request.user, initial=initial)\n if request.method == 'POST':\n form = NewSparkJobForm(\n request.user,\n data=request.POST,\n files=request.FILES,\n initial=initial,\n )\n if form.is_valid():\n # this will also magically create the spark job for us\n spark_job = form.save()\n return redirect(spark_job)\n\n context = {\n 'form': form,\n }\n return render(request, 'atmo/jobs/new.html', context)\n\n\n@login_required\n@change_permission_required(SparkJob)\ndef edit_spark_job(request, id):\n \"\"\"\n View to edit a scheduled Spark job that runs on AWS EMR.\n \"\"\"\n spark_job = SparkJob.objects.get(pk=id)\n form = EditSparkJobForm(request.user, instance=spark_job)\n if request.method == 'POST':\n form = EditSparkJobForm(\n request.user,\n data=request.POST,\n files=request.FILES,\n instance=spark_job,\n )\n if form.is_valid():\n # this will also update the job for us\n spark_job = form.save()\n return redirect(spark_job)\n context = {\n 'form': form,\n }\n return render(request, 'atmo/jobs/edit.html', context)\n\n\n@login_required\n@delete_permission_required(SparkJob)\ndef delete_spark_job(request, id):\n \"\"\"\n View to delete a scheduled Spark job and then redirects to the dashboard.\n \"\"\"\n spark_job = SparkJob.objects.get(pk=id)\n if request.method == 'POST':\n spark_job.delete()\n return redirect('dashboard')\n context = {\n 'spark_job': spark_job,\n }\n return render(request, 'atmo/jobs/delete.html', context=context)\n\n\n@login_required\n@view_permission_required(SparkJob)\n@modified_date\ndef detail_spark_job(request, id):\n \"\"\"\n View to show the details for the scheduled Spark job with the given ID.\n \"\"\"\n spark_job = SparkJob.objects.get(pk=id)\n context = {\n 'spark_job': spark_job,\n }\n if spark_job.latest_run:\n context['modified_date'] = spark_job.latest_run.modified_at\n return TemplateResponse(request, 'atmo/jobs/detail.html', context=context)\n\n\n@login_required\n@view_permission_required(SparkJob)\n@modified_date\ndef detail_zeppelin_job(request, id):\n \"\"\"\n View to show the details for the scheduled Zeppelin job with the given ID.\n \"\"\"\n spark_job = get_object_or_404(SparkJob, pk=id)\n response = ''\n if spark_job.results:\n markdown_url = ''.join([x for x in spark_job.results['data'] if x.endswith('md')])\n bucket = settings.AWS_CONFIG['PUBLIC_DATA_BUCKET']\n markdown_file = spark_job.provisioner.s3.get_object(Bucket=bucket,\n Key=markdown_url)\n response = markdown_file['Body'].read().decode('utf-8')\n\n context = {\n 'markdown': response\n }\n return TemplateResponse(request, 'atmo/jobs/zeppelin_notebook.html', context=context)\n\n\n@login_required\n@view_permission_required(SparkJob)\ndef download_spark_job(request, id):\n \"\"\"\n Download the notebook file for the scheduled Spark job with the given ID.\n \"\"\"\n spark_job = SparkJob.objects.get(pk=id)\n response = StreamingHttpResponse(\n spark_job.notebook_s3_object['Body'].read().decode('utf-8'),\n content_type='application/x-ipynb+json',\n )\n response['Content-Disposition'] = (\n 'attachment; filename=%s' %\n get_valid_filename(spark_job.notebook_name)\n )\n response['Content-Length'] = spark_job.notebook_s3_object['ContentLength']\n return response\n\n\n@login_required\n@view_permission_required(SparkJob)\ndef run_spark_job(request, id):\n \"\"\"\n Run a scheduled Spark job right now, out of sync with its actual schedule.\n\n This will actively ask for confirmation to run the Spark job.\n \"\"\"\n spark_job = SparkJob.objects.get(pk=id)\n if not spark_job.is_runnable:\n messages.error(\n request,\n mark_safe(\n '<h4>Run now unavailable.</h4>'\n \"The Spark job can't be run manually at this time. Please try again later.\"\n )\n )\n return redirect(spark_job)\n\n if request.method == 'POST':\n if spark_job.latest_run:\n try:\n spark_job.latest_run.sync()\n except ClientError:\n messages.error(\n request,\n mark_safe(\n '<h4>Spark job API error</h4>'\n \"The Spark job can't be run at the moment since there was a \"\n \"problem with fetching the status of the previous job run. \"\n \"Please try again later.\"\n )\n )\n return redirect(spark_job)\n\n spark_job.run()\n latest_run = spark_job.get_latest_run()\n if latest_run:\n schedule_entry = spark_job.schedule.get()\n schedule_entry.reschedule(\n last_run_at=spark_job.latest_run.scheduled_at,\n )\n return redirect(spark_job)\n\n context = {\n 'spark_job': spark_job,\n }\n return render(request, 'atmo/jobs/run.html', context=context)\n", "path": "atmo/jobs/views.py"}]}
2,848
245
gh_patches_debug_102
rasdani/github-patches
git_diff
scipy__scipy-17210
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> BUG: Build failure due to problems with shebang line in cythoner.py I ran into a problem running `dev.py` that appears to be caused by the shebang line `#!python3` in the file `scipy/_build_utils/cythoner.py`. If I change it to `#!/usr/bin/env python` then the build works fine. Most files in scipy with a shebang line use `#!/usr/bin/env python`. Only files in the `_build_utils` use `#!python3`. Error message when running `python dev.py build`: ```shell Meson build setup OK 💻 ninja -C /mnt/c/Users/Jozsef/OSS/scipy-test/build ninja: Entering directory `/mnt/c/Users/Jozsef/OSS/scipy-test/build' [3/1562] Generating 'scipy/_lib/_ccallback_c.cpython-310-x86_64-linux-gnu.so.p/_ccallback_c.c'. FAILED: scipy/_lib/_ccallback_c.cpython-310-x86_64-linux-gnu.so.p/_ccallback_c.c /mnt/c/Users/Jozsef/OSS/scipy-test/scipy/_build_utils/cythoner.py ../scipy/_lib/_ccallback_c.pyx scipy/_lib/_ccallback_c.cpython-310-x86_64-linux-gnu.so.p/_ccallback_c.c /bin/sh: 1: /mnt/c/Users/Jozsef/OSS/scipy-test/scipy/_build_utils/cythoner.py: not found [12/1562] Compiling C++ object scipy/_lib/_uarray/_uarray.cpython-310-x86_64-linux-gnu.so.p/_uarray_dispatch.cxx.o ninja: build stopped: subcommand failed. Build failed! ``` If I try running `cythoner.py` directly: ```shell -bash: /mnt/c/Users/Jozsef/OSS/scipy-conda/scipy/_build_utils/cythoner.py: python3: bad interpreter: No such file or directory ``` I'm using conda with WSL (Ubuntu). </issue> <code> [start of scipy/_build_utils/cythoner.py] 1 #!python3 2 """ Scipy variant of Cython command 3 4 Cython, as applied to single pyx file. 5 6 Expects two arguments, infile and outfile. 7 8 Other options passed through to cython command line parser. 9 """ 10 11 import os 12 import os.path as op 13 import sys 14 import subprocess as sbp 15 16 17 def main(): 18 in_fname, out_fname = (op.abspath(p) for p in sys.argv[1:3]) 19 20 sbp.run(['cython', '-3', '--fast-fail', 21 '--output-file', out_fname, 22 '--include-dir', os.getcwd()] + 23 sys.argv[3:] + [in_fname], 24 check=True) 25 26 27 if __name__ == '__main__': 28 main() 29 [end of scipy/_build_utils/cythoner.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scipy/_build_utils/cythoner.py b/scipy/_build_utils/cythoner.py --- a/scipy/_build_utils/cythoner.py +++ b/scipy/_build_utils/cythoner.py @@ -1,4 +1,4 @@ -#!python3 +#!/usr/bin/env python3 """ Scipy variant of Cython command Cython, as applied to single pyx file.
{"golden_diff": "diff --git a/scipy/_build_utils/cythoner.py b/scipy/_build_utils/cythoner.py\n--- a/scipy/_build_utils/cythoner.py\n+++ b/scipy/_build_utils/cythoner.py\n@@ -1,4 +1,4 @@\n-#!python3\n+#!/usr/bin/env python3\n \"\"\" Scipy variant of Cython command\n \n Cython, as applied to single pyx file.\n", "issue": "BUG: Build failure due to problems with shebang line in cythoner.py\nI ran into a problem running `dev.py` that appears to be caused by the shebang line `#!python3` in the file `scipy/_build_utils/cythoner.py`. If I change it to `#!/usr/bin/env python` then the build works fine.\r\n\r\nMost files in scipy with a shebang line use `#!/usr/bin/env python`. Only files in the `_build_utils` use `#!python3`.\r\n\r\nError message when running `python dev.py build`:\r\n\r\n```shell\r\nMeson build setup OK\r\n\ud83d\udcbb ninja -C /mnt/c/Users/Jozsef/OSS/scipy-test/build\r\nninja: Entering directory `/mnt/c/Users/Jozsef/OSS/scipy-test/build'\r\n[3/1562] Generating 'scipy/_lib/_ccallback_c.cpython-310-x86_64-linux-gnu.so.p/_ccallback_c.c'.\r\nFAILED: scipy/_lib/_ccallback_c.cpython-310-x86_64-linux-gnu.so.p/_ccallback_c.c\r\n/mnt/c/Users/Jozsef/OSS/scipy-test/scipy/_build_utils/cythoner.py ../scipy/_lib/_ccallback_c.pyx scipy/_lib/_ccallback_c.cpython-310-x86_64-linux-gnu.so.p/_ccallback_c.c\r\n/bin/sh: 1: /mnt/c/Users/Jozsef/OSS/scipy-test/scipy/_build_utils/cythoner.py: not found\r\n[12/1562] Compiling C++ object scipy/_lib/_uarray/_uarray.cpython-310-x86_64-linux-gnu.so.p/_uarray_dispatch.cxx.o\r\nninja: build stopped: subcommand failed.\r\nBuild failed!\r\n```\r\n\r\nIf I try running `cythoner.py` directly:\r\n\r\n```shell\r\n-bash: /mnt/c/Users/Jozsef/OSS/scipy-conda/scipy/_build_utils/cythoner.py: python3: bad interpreter: No such file or directory\r\n```\r\n\r\nI'm using conda with WSL (Ubuntu).\n", "before_files": [{"content": "#!python3\n\"\"\" Scipy variant of Cython command\n\nCython, as applied to single pyx file.\n\nExpects two arguments, infile and outfile.\n\nOther options passed through to cython command line parser.\n\"\"\"\n\nimport os\nimport os.path as op\nimport sys\nimport subprocess as sbp\n\n\ndef main():\n in_fname, out_fname = (op.abspath(p) for p in sys.argv[1:3])\n\n sbp.run(['cython', '-3', '--fast-fail',\n '--output-file', out_fname,\n '--include-dir', os.getcwd()] +\n sys.argv[3:] + [in_fname],\n check=True)\n\n\nif __name__ == '__main__':\n main()\n", "path": "scipy/_build_utils/cythoner.py"}]}
1,210
91
gh_patches_debug_28238
rasdani/github-patches
git_diff
falconry__falcon-1785
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Custom media handlers: Unexpected issue when providing custom json handler This is in falcon-2.0 Look at the documentation [here][1] for using rapidjson for encoding/decoding json. By providing: `extra_handlers={'application/json': json_handler}` we are still left with the default handler for content-type `application-json; charset=UTF-8`. This results in an unexpected behaviour when some client library (e.g. Retrofit for Android) includes the charset in the header. While the documentation should be updated, the expected behaviour is that if the handler for `application/json` is updated - it should also update the handler for variant with charset (or at least throw a warning) otherwise there is a possibility of hidden bugs. [1]: https://falcon.readthedocs.io/en/stable/api/media.html </issue> <code> [start of falcon/media/handlers.py] 1 from collections import UserDict 2 3 from falcon import errors 4 from falcon.constants import MEDIA_MULTIPART, MEDIA_URLENCODED 5 from falcon.media.json import JSONHandler 6 from falcon.media.multipart import MultipartFormHandler, MultipartParseOptions 7 from falcon.media.urlencoded import URLEncodedFormHandler 8 from falcon.vendor import mimeparse 9 10 11 class Handlers(UserDict): 12 """A :class:`dict`-like object that manages Internet media type handlers.""" 13 def __init__(self, initial=None): 14 handlers = initial or { 15 'application/json': JSONHandler(), 16 'application/json; charset=UTF-8': JSONHandler(), 17 MEDIA_MULTIPART: MultipartFormHandler(), 18 MEDIA_URLENCODED: URLEncodedFormHandler(), 19 } 20 21 # NOTE(jmvrbanac): Directly calling UserDict as it's not inheritable. 22 # Also, this results in self.update(...) being called. 23 UserDict.__init__(self, handlers) 24 25 def _resolve_media_type(self, media_type, all_media_types): 26 resolved = None 27 28 try: 29 # NOTE(jmvrbanac): Mimeparse will return an empty string if it can 30 # parse the media type, but cannot find a suitable type. 31 resolved = mimeparse.best_match( 32 all_media_types, 33 media_type 34 ) 35 except ValueError: 36 pass 37 38 return resolved 39 40 def find_by_media_type(self, media_type, default): 41 # PERF(jmvrbanac): Check via a quick methods first for performance 42 if media_type == '*/*' or not media_type: 43 media_type = default 44 45 try: 46 return self.data[media_type] 47 except KeyError: 48 pass 49 50 # PERF(jmvrbanac): Fallback to the slower method 51 resolved = self._resolve_media_type(media_type, self.data.keys()) 52 53 if not resolved: 54 raise errors.HTTPUnsupportedMediaType( 55 description='{0} is an unsupported media type.'.format(media_type) 56 ) 57 58 return self.data[resolved] 59 60 61 # NOTE(vytas): An ugly way to work around circular imports. 62 MultipartParseOptions._DEFAULT_HANDLERS = Handlers({ 63 'application/json': JSONHandler(), 64 'application/json; charset=UTF-8': JSONHandler(), 65 MEDIA_URLENCODED: URLEncodedFormHandler(), 66 }) # type: ignore 67 [end of falcon/media/handlers.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/falcon/media/handlers.py b/falcon/media/handlers.py --- a/falcon/media/handlers.py +++ b/falcon/media/handlers.py @@ -1,7 +1,7 @@ from collections import UserDict from falcon import errors -from falcon.constants import MEDIA_MULTIPART, MEDIA_URLENCODED +from falcon.constants import MEDIA_JSON, MEDIA_MULTIPART, MEDIA_URLENCODED from falcon.media.json import JSONHandler from falcon.media.multipart import MultipartFormHandler, MultipartParseOptions from falcon.media.urlencoded import URLEncodedFormHandler @@ -12,8 +12,7 @@ """A :class:`dict`-like object that manages Internet media type handlers.""" def __init__(self, initial=None): handlers = initial or { - 'application/json': JSONHandler(), - 'application/json; charset=UTF-8': JSONHandler(), + MEDIA_JSON: JSONHandler(), MEDIA_MULTIPART: MultipartFormHandler(), MEDIA_URLENCODED: URLEncodedFormHandler(), } @@ -60,7 +59,6 @@ # NOTE(vytas): An ugly way to work around circular imports. MultipartParseOptions._DEFAULT_HANDLERS = Handlers({ - 'application/json': JSONHandler(), - 'application/json; charset=UTF-8': JSONHandler(), + MEDIA_JSON: JSONHandler(), MEDIA_URLENCODED: URLEncodedFormHandler(), }) # type: ignore
{"golden_diff": "diff --git a/falcon/media/handlers.py b/falcon/media/handlers.py\n--- a/falcon/media/handlers.py\n+++ b/falcon/media/handlers.py\n@@ -1,7 +1,7 @@\n from collections import UserDict\n \n from falcon import errors\n-from falcon.constants import MEDIA_MULTIPART, MEDIA_URLENCODED\n+from falcon.constants import MEDIA_JSON, MEDIA_MULTIPART, MEDIA_URLENCODED\n from falcon.media.json import JSONHandler\n from falcon.media.multipart import MultipartFormHandler, MultipartParseOptions\n from falcon.media.urlencoded import URLEncodedFormHandler\n@@ -12,8 +12,7 @@\n \"\"\"A :class:`dict`-like object that manages Internet media type handlers.\"\"\"\n def __init__(self, initial=None):\n handlers = initial or {\n- 'application/json': JSONHandler(),\n- 'application/json; charset=UTF-8': JSONHandler(),\n+ MEDIA_JSON: JSONHandler(),\n MEDIA_MULTIPART: MultipartFormHandler(),\n MEDIA_URLENCODED: URLEncodedFormHandler(),\n }\n@@ -60,7 +59,6 @@\n \n # NOTE(vytas): An ugly way to work around circular imports.\n MultipartParseOptions._DEFAULT_HANDLERS = Handlers({\n- 'application/json': JSONHandler(),\n- 'application/json; charset=UTF-8': JSONHandler(),\n+ MEDIA_JSON: JSONHandler(),\n MEDIA_URLENCODED: URLEncodedFormHandler(),\n }) # type: ignore\n", "issue": "Custom media handlers: Unexpected issue when providing custom json handler\nThis is in falcon-2.0\r\n\r\nLook at the documentation [here][1] for using rapidjson for encoding/decoding json. By providing:\r\n\r\n`extra_handlers={'application/json': json_handler}` we are still left with the default handler for content-type `application-json; charset=UTF-8`. This results in an unexpected behaviour when some client library (e.g. Retrofit for Android) includes the charset in the header. \r\n\r\nWhile the documentation should be updated, the expected behaviour is that if the handler for `application/json` is updated - it should also update the handler for variant with charset (or at least throw a warning) otherwise there is a possibility of hidden bugs. \r\n\r\n[1]: https://falcon.readthedocs.io/en/stable/api/media.html\n", "before_files": [{"content": "from collections import UserDict\n\nfrom falcon import errors\nfrom falcon.constants import MEDIA_MULTIPART, MEDIA_URLENCODED\nfrom falcon.media.json import JSONHandler\nfrom falcon.media.multipart import MultipartFormHandler, MultipartParseOptions\nfrom falcon.media.urlencoded import URLEncodedFormHandler\nfrom falcon.vendor import mimeparse\n\n\nclass Handlers(UserDict):\n \"\"\"A :class:`dict`-like object that manages Internet media type handlers.\"\"\"\n def __init__(self, initial=None):\n handlers = initial or {\n 'application/json': JSONHandler(),\n 'application/json; charset=UTF-8': JSONHandler(),\n MEDIA_MULTIPART: MultipartFormHandler(),\n MEDIA_URLENCODED: URLEncodedFormHandler(),\n }\n\n # NOTE(jmvrbanac): Directly calling UserDict as it's not inheritable.\n # Also, this results in self.update(...) being called.\n UserDict.__init__(self, handlers)\n\n def _resolve_media_type(self, media_type, all_media_types):\n resolved = None\n\n try:\n # NOTE(jmvrbanac): Mimeparse will return an empty string if it can\n # parse the media type, but cannot find a suitable type.\n resolved = mimeparse.best_match(\n all_media_types,\n media_type\n )\n except ValueError:\n pass\n\n return resolved\n\n def find_by_media_type(self, media_type, default):\n # PERF(jmvrbanac): Check via a quick methods first for performance\n if media_type == '*/*' or not media_type:\n media_type = default\n\n try:\n return self.data[media_type]\n except KeyError:\n pass\n\n # PERF(jmvrbanac): Fallback to the slower method\n resolved = self._resolve_media_type(media_type, self.data.keys())\n\n if not resolved:\n raise errors.HTTPUnsupportedMediaType(\n description='{0} is an unsupported media type.'.format(media_type)\n )\n\n return self.data[resolved]\n\n\n# NOTE(vytas): An ugly way to work around circular imports.\nMultipartParseOptions._DEFAULT_HANDLERS = Handlers({\n 'application/json': JSONHandler(),\n 'application/json; charset=UTF-8': JSONHandler(),\n MEDIA_URLENCODED: URLEncodedFormHandler(),\n}) # type: ignore\n", "path": "falcon/media/handlers.py"}]}
1,350
340
gh_patches_debug_42353
rasdani/github-patches
git_diff
plotly__plotly.py-82
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Stream TypeError: 'str' does not support the buffer interface Hello. When I try [this](http://nbviewer.ipython.org/github/plotly/python-user-guide/blob/master/s7_streaming/s7_streaming_p1-first-stream.ipynb) code I get an error in rendering Cell number 11: ``` TypeError: 'str' does not support the buffer interface ``` I guess it's because I'm running `Python 3.4`. Since Python 3 there is necessary to treat bytes and unicodes differently. </issue> <code> [start of plotly/plotly/chunked_requests/chunked_request.py] 1 import time 2 import six 3 from six.moves import http_client 4 5 6 class Stream: 7 def __init__(self, server, port=80, headers={}): 8 ''' Initialize a stream object and an HTTP Connection 9 with chunked Transfer-Encoding to server:port with optional headers. 10 ''' 11 self.maxtries = 5 12 self._tries = 0 13 self._delay = 1 14 self._closed = False 15 self._server = server 16 self._port = port 17 self._headers = headers 18 self._connect() 19 20 def write(self, data, reconnect_on=('', 200, )): 21 ''' Send `data` to the server in chunk-encoded form. 22 Check the connection before writing and reconnect 23 if disconnected and if the response status code is in `reconnect_on`. 24 25 The response may either be an HTTPResponse object or an empty string. 26 ''' 27 28 if not self._isconnected(): 29 30 # Attempt to get the response. 31 response = self._getresponse() 32 33 # Reconnect depending on the status code. 34 if ((response == '' and '' in reconnect_on) or 35 (response and isinstance(response, http_client.HTTPResponse) and 36 response.status in reconnect_on)): 37 self._reconnect() 38 39 elif response and isinstance(response, http_client.HTTPResponse): 40 # If an HTTPResponse was recieved then 41 # make the users aware instead of 42 # auto-reconnecting in case the 43 # server is responding with an important 44 # message that might prevent 45 # future requests from going through, 46 # like Invalid Credentials. 47 # This allows the user to determine when 48 # to reconnect. 49 raise Exception("Server responded with " 50 "status code: {status_code}\n" 51 "and message: {msg}." 52 .format(status_code=response.status, 53 msg=response.read())) 54 55 elif response == '': 56 raise Exception("Attempted to write but socket " 57 "was not connected.") 58 59 try: 60 msg = data 61 msglen = format(len(msg), 'x') # msg length in hex 62 # Send the message in chunk-encoded form 63 self._conn.send('{msglen}\r\n{msg}\r\n' 64 .format(msglen=msglen, msg=msg)) 65 except http_client.socket.error: 66 self._reconnect() 67 self.write(data) 68 69 def _connect(self): 70 ''' Initialize an HTTP connection with chunked Transfer-Encoding 71 to server:port with optional headers. 72 ''' 73 server = self._server 74 port = self._port 75 headers = self._headers 76 self._conn = http_client.HTTPConnection(server, port) 77 78 self._conn.putrequest('POST', '/') 79 self._conn.putheader('Transfer-Encoding', 'chunked') 80 for header in headers: 81 self._conn.putheader(header, headers[header]) 82 self._conn.endheaders() 83 84 # Set blocking to False prevents recv 85 # from blocking while waiting for a response. 86 self._conn.sock.setblocking(False) 87 self._bytes = '' 88 self._reset_retries() 89 time.sleep(0.5) 90 91 def close(self): 92 ''' Close the connection to server. 93 94 If available, return a http_client.HTTPResponse object. 95 96 Closing the connection involves sending the 97 Transfer-Encoding terminating bytes. 98 ''' 99 self._reset_retries() 100 self._closed = True 101 102 # Chunked-encoded posts are terminated with '0\r\n\r\n' 103 # For some reason, either Python or node.js seems to 104 # require an extra \r\n. 105 try: 106 self._conn.send('\r\n0\r\n\r\n') 107 except http_client.socket.error: 108 # In case the socket has already been closed 109 return '' 110 111 return self._getresponse() 112 113 def _getresponse(self): 114 ''' Read from recv and return a HTTPResponse object if possible. 115 Either 116 1 - The client has succesfully closed the connection: Return '' 117 2 - The server has already closed the connection: Return the response 118 if possible. 119 ''' 120 # Wait for a response 121 self._conn.sock.setblocking(True) 122 # Parse the response 123 response = self._bytes 124 while True: 125 try: 126 bytes = self._conn.sock.recv(1) 127 except http_client.socket.error: 128 # For error 54: Connection reset by peer 129 # (and perhaps others) 130 return '' 131 if bytes == '': 132 break 133 else: 134 response += bytes 135 # Set recv to be non-blocking again 136 self._conn.sock.setblocking(False) 137 138 # Convert the response string to a http_client.HTTPResponse 139 # object with a bit of a hack 140 if response != '': 141 # Taken from 142 # http://pythonwise.blogspot.ca/2010/02/parse-http-response.html 143 try: 144 response = http_client.HTTPResponse(_FakeSocket(response)) 145 response.begin() 146 except: 147 # Bad headers ... etc. 148 response = '' 149 return response 150 151 def _isconnected(self): 152 ''' Return True if the socket is still connected 153 to the server, False otherwise. 154 155 This check is done in 3 steps: 156 1 - Check if we have closed the connection 157 2 - Check if the original socket connection failed 158 3 - Check if the server has returned any data. If they have, 159 assume that the server closed the response after they sent 160 the data, i.e. that the data was the HTTP response. 161 ''' 162 163 # 1 - check if we've closed the connection. 164 if self._closed: 165 return False 166 167 # 2 - Check if the original socket connection failed 168 # If this failed, then no socket was initialized 169 if self._conn.sock is None: 170 return False 171 172 try: 173 # 3 - Check if the server has returned any data. 174 # If they have, then start to store the response 175 # in _bytes. 176 self._bytes = '' 177 self._bytes = self._conn.sock.recv(1) 178 return False 179 except http_client.socket.error as e: 180 # Check why recv failed 181 # Windows machines are the error codes 182 # that start with 1 183 # (http://msdn.microsoft.com/en-ca/library/windows/desktop/ms740668(v=vs.85).aspx) 184 if e.errno == 35 or e.errno == 10035: 185 # This is the "Resource temporarily unavailable" error 186 # which is thrown cuz there was nothing to receive, i.e. 187 # the server hasn't returned a response yet. 188 # This is a non-fatal error and the operation 189 # should be tried again. 190 # So, assume that the connection is still open. 191 return True 192 elif e.errno == 54 or e.errno == 10054: 193 # This is the "Connection reset by peer" error 194 # which is thrown cuz the server reset the 195 # socket, so the connection is closed. 196 return False 197 elif e.errno == 11: 198 # This is the "Resource temporarily unavailable" error 199 # which happens because the "operation would have blocked 200 # but nonblocking operation was requested". 201 # We require non-blocking reading of this socket because 202 # we don't want to wait around for a response, we just 203 # want to see if a response is currently available. So 204 # let's just assume that we're still connected and 205 # hopefully recieve some data on the next try. 206 return True 207 else: 208 # Unknown scenario 209 raise e 210 211 def _reconnect(self): 212 ''' Connect if disconnected. 213 Retry self.maxtries times with delays 214 ''' 215 if not self._isconnected(): 216 try: 217 self._connect() 218 except http_client.socket.error as e: 219 # Attempt to reconnect if the connection was refused 220 if e.errno == 61 or e.errno == 10061: 221 # errno 61 is the "Connection Refused" error 222 time.sleep(self._delay) 223 self._delay += self._delay # fibonacii delays 224 self._tries += 1 225 if self._tries < self.maxtries: 226 self._reconnect() 227 else: 228 self._reset_retries() 229 raise e 230 else: 231 # Unknown scenario 232 raise e 233 234 # Reconnect worked - reset _closed 235 self._closed = False 236 237 def _reset_retries(self): 238 ''' Reset the connect counters and delays 239 ''' 240 self._tries = 0 241 self._delay = 1 242 243 244 class _FakeSocket(six.StringIO): 245 # Used to construct a http_client.HTTPResponse object 246 # from a string. 247 # Thx to: http://pythonwise.blogspot.ca/2010/02/parse-http-response.html 248 def makefile(self, *args, **kwargs): 249 return self 250 [end of plotly/plotly/chunked_requests/chunked_request.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/plotly/plotly/chunked_requests/chunked_request.py b/plotly/plotly/chunked_requests/chunked_request.py --- a/plotly/plotly/chunked_requests/chunked_request.py +++ b/plotly/plotly/chunked_requests/chunked_request.py @@ -61,7 +61,7 @@ msglen = format(len(msg), 'x') # msg length in hex # Send the message in chunk-encoded form self._conn.send('{msglen}\r\n{msg}\r\n' - .format(msglen=msglen, msg=msg)) + .format(msglen=msglen, msg=msg).encode('utf-8')) except http_client.socket.error: self._reconnect() self.write(data) @@ -84,7 +84,7 @@ # Set blocking to False prevents recv # from blocking while waiting for a response. self._conn.sock.setblocking(False) - self._bytes = '' + self._bytes = six.b('') self._reset_retries() time.sleep(0.5) @@ -103,7 +103,7 @@ # For some reason, either Python or node.js seems to # require an extra \r\n. try: - self._conn.send('\r\n0\r\n\r\n') + self._conn.send('\r\n0\r\n\r\n'.encode('utf-8')) except http_client.socket.error: # In case the socket has already been closed return '' @@ -123,21 +123,21 @@ response = self._bytes while True: try: - bytes = self._conn.sock.recv(1) + _bytes = self._conn.sock.recv(1) except http_client.socket.error: # For error 54: Connection reset by peer # (and perhaps others) - return '' - if bytes == '': + return six.b('') + if _bytes == six.b(''): break else: - response += bytes + response += _bytes # Set recv to be non-blocking again self._conn.sock.setblocking(False) # Convert the response string to a http_client.HTTPResponse # object with a bit of a hack - if response != '': + if response != six.b(''): # Taken from # http://pythonwise.blogspot.ca/2010/02/parse-http-response.html try: @@ -145,7 +145,7 @@ response.begin() except: # Bad headers ... etc. - response = '' + response = six.b('') return response def _isconnected(self): @@ -173,7 +173,7 @@ # 3 - Check if the server has returned any data. # If they have, then start to store the response # in _bytes. - self._bytes = '' + self._bytes = six.b('') self._bytes = self._conn.sock.recv(1) return False except http_client.socket.error as e:
{"golden_diff": "diff --git a/plotly/plotly/chunked_requests/chunked_request.py b/plotly/plotly/chunked_requests/chunked_request.py\n--- a/plotly/plotly/chunked_requests/chunked_request.py\n+++ b/plotly/plotly/chunked_requests/chunked_request.py\n@@ -61,7 +61,7 @@\n msglen = format(len(msg), 'x') # msg length in hex\n # Send the message in chunk-encoded form\n self._conn.send('{msglen}\\r\\n{msg}\\r\\n'\n- .format(msglen=msglen, msg=msg))\n+ .format(msglen=msglen, msg=msg).encode('utf-8'))\n except http_client.socket.error:\n self._reconnect()\n self.write(data)\n@@ -84,7 +84,7 @@\n # Set blocking to False prevents recv\n # from blocking while waiting for a response.\n self._conn.sock.setblocking(False)\n- self._bytes = ''\n+ self._bytes = six.b('')\n self._reset_retries()\n time.sleep(0.5)\n \n@@ -103,7 +103,7 @@\n # For some reason, either Python or node.js seems to\n # require an extra \\r\\n.\n try:\n- self._conn.send('\\r\\n0\\r\\n\\r\\n')\n+ self._conn.send('\\r\\n0\\r\\n\\r\\n'.encode('utf-8'))\n except http_client.socket.error:\n # In case the socket has already been closed\n return ''\n@@ -123,21 +123,21 @@\n response = self._bytes\n while True:\n try:\n- bytes = self._conn.sock.recv(1)\n+ _bytes = self._conn.sock.recv(1)\n except http_client.socket.error:\n # For error 54: Connection reset by peer\n # (and perhaps others)\n- return ''\n- if bytes == '':\n+ return six.b('')\n+ if _bytes == six.b(''):\n break\n else:\n- response += bytes\n+ response += _bytes\n # Set recv to be non-blocking again\n self._conn.sock.setblocking(False)\n \n # Convert the response string to a http_client.HTTPResponse\n # object with a bit of a hack\n- if response != '':\n+ if response != six.b(''):\n # Taken from\n # http://pythonwise.blogspot.ca/2010/02/parse-http-response.html\n try:\n@@ -145,7 +145,7 @@\n response.begin()\n except:\n # Bad headers ... etc.\n- response = ''\n+ response = six.b('')\n return response\n \n def _isconnected(self):\n@@ -173,7 +173,7 @@\n # 3 - Check if the server has returned any data.\n # If they have, then start to store the response\n # in _bytes.\n- self._bytes = ''\n+ self._bytes = six.b('')\n self._bytes = self._conn.sock.recv(1)\n return False\n except http_client.socket.error as e:\n", "issue": "Stream TypeError: 'str' does not support the buffer interface\nHello.\n\nWhen I try [this](http://nbviewer.ipython.org/github/plotly/python-user-guide/blob/master/s7_streaming/s7_streaming_p1-first-stream.ipynb) code I get an error in rendering Cell number 11:\n\n```\nTypeError: 'str' does not support the buffer interface\n```\n\nI guess it's because I'm running `Python 3.4`. Since Python 3 there is necessary to treat bytes and unicodes differently.\n\n", "before_files": [{"content": "import time\nimport six\nfrom six.moves import http_client\n\n\nclass Stream:\n def __init__(self, server, port=80, headers={}):\n ''' Initialize a stream object and an HTTP Connection\n with chunked Transfer-Encoding to server:port with optional headers.\n '''\n self.maxtries = 5\n self._tries = 0\n self._delay = 1\n self._closed = False\n self._server = server\n self._port = port\n self._headers = headers\n self._connect()\n\n def write(self, data, reconnect_on=('', 200, )):\n ''' Send `data` to the server in chunk-encoded form.\n Check the connection before writing and reconnect\n if disconnected and if the response status code is in `reconnect_on`.\n\n The response may either be an HTTPResponse object or an empty string.\n '''\n\n if not self._isconnected():\n\n # Attempt to get the response.\n response = self._getresponse()\n\n # Reconnect depending on the status code.\n if ((response == '' and '' in reconnect_on) or\n (response and isinstance(response, http_client.HTTPResponse) and\n response.status in reconnect_on)):\n self._reconnect()\n\n elif response and isinstance(response, http_client.HTTPResponse):\n # If an HTTPResponse was recieved then\n # make the users aware instead of\n # auto-reconnecting in case the\n # server is responding with an important\n # message that might prevent\n # future requests from going through,\n # like Invalid Credentials.\n # This allows the user to determine when\n # to reconnect.\n raise Exception(\"Server responded with \"\n \"status code: {status_code}\\n\"\n \"and message: {msg}.\"\n .format(status_code=response.status,\n msg=response.read()))\n\n elif response == '':\n raise Exception(\"Attempted to write but socket \"\n \"was not connected.\")\n\n try:\n msg = data\n msglen = format(len(msg), 'x') # msg length in hex\n # Send the message in chunk-encoded form\n self._conn.send('{msglen}\\r\\n{msg}\\r\\n'\n .format(msglen=msglen, msg=msg))\n except http_client.socket.error:\n self._reconnect()\n self.write(data)\n\n def _connect(self):\n ''' Initialize an HTTP connection with chunked Transfer-Encoding\n to server:port with optional headers.\n '''\n server = self._server\n port = self._port\n headers = self._headers\n self._conn = http_client.HTTPConnection(server, port)\n\n self._conn.putrequest('POST', '/')\n self._conn.putheader('Transfer-Encoding', 'chunked')\n for header in headers:\n self._conn.putheader(header, headers[header])\n self._conn.endheaders()\n\n # Set blocking to False prevents recv\n # from blocking while waiting for a response.\n self._conn.sock.setblocking(False)\n self._bytes = ''\n self._reset_retries()\n time.sleep(0.5)\n\n def close(self):\n ''' Close the connection to server.\n\n If available, return a http_client.HTTPResponse object.\n\n Closing the connection involves sending the\n Transfer-Encoding terminating bytes.\n '''\n self._reset_retries()\n self._closed = True\n\n # Chunked-encoded posts are terminated with '0\\r\\n\\r\\n'\n # For some reason, either Python or node.js seems to\n # require an extra \\r\\n.\n try:\n self._conn.send('\\r\\n0\\r\\n\\r\\n')\n except http_client.socket.error:\n # In case the socket has already been closed\n return ''\n\n return self._getresponse()\n\n def _getresponse(self):\n ''' Read from recv and return a HTTPResponse object if possible.\n Either\n 1 - The client has succesfully closed the connection: Return ''\n 2 - The server has already closed the connection: Return the response\n if possible.\n '''\n # Wait for a response\n self._conn.sock.setblocking(True)\n # Parse the response\n response = self._bytes\n while True:\n try:\n bytes = self._conn.sock.recv(1)\n except http_client.socket.error:\n # For error 54: Connection reset by peer\n # (and perhaps others)\n return ''\n if bytes == '':\n break\n else:\n response += bytes\n # Set recv to be non-blocking again\n self._conn.sock.setblocking(False)\n\n # Convert the response string to a http_client.HTTPResponse\n # object with a bit of a hack\n if response != '':\n # Taken from\n # http://pythonwise.blogspot.ca/2010/02/parse-http-response.html\n try:\n response = http_client.HTTPResponse(_FakeSocket(response))\n response.begin()\n except:\n # Bad headers ... etc.\n response = ''\n return response\n\n def _isconnected(self):\n ''' Return True if the socket is still connected\n to the server, False otherwise.\n\n This check is done in 3 steps:\n 1 - Check if we have closed the connection\n 2 - Check if the original socket connection failed\n 3 - Check if the server has returned any data. If they have,\n assume that the server closed the response after they sent\n the data, i.e. that the data was the HTTP response.\n '''\n\n # 1 - check if we've closed the connection.\n if self._closed:\n return False\n\n # 2 - Check if the original socket connection failed\n # If this failed, then no socket was initialized\n if self._conn.sock is None:\n return False\n\n try:\n # 3 - Check if the server has returned any data.\n # If they have, then start to store the response\n # in _bytes.\n self._bytes = ''\n self._bytes = self._conn.sock.recv(1)\n return False\n except http_client.socket.error as e:\n # Check why recv failed\n # Windows machines are the error codes\n # that start with 1\n # (http://msdn.microsoft.com/en-ca/library/windows/desktop/ms740668(v=vs.85).aspx)\n if e.errno == 35 or e.errno == 10035:\n # This is the \"Resource temporarily unavailable\" error\n # which is thrown cuz there was nothing to receive, i.e.\n # the server hasn't returned a response yet.\n # This is a non-fatal error and the operation\n # should be tried again.\n # So, assume that the connection is still open.\n return True\n elif e.errno == 54 or e.errno == 10054:\n # This is the \"Connection reset by peer\" error\n # which is thrown cuz the server reset the\n # socket, so the connection is closed.\n return False\n elif e.errno == 11:\n # This is the \"Resource temporarily unavailable\" error\n # which happens because the \"operation would have blocked\n # but nonblocking operation was requested\".\n # We require non-blocking reading of this socket because\n # we don't want to wait around for a response, we just\n # want to see if a response is currently available. So\n # let's just assume that we're still connected and\n # hopefully recieve some data on the next try.\n return True\n else:\n # Unknown scenario\n raise e\n\n def _reconnect(self):\n ''' Connect if disconnected.\n Retry self.maxtries times with delays\n '''\n if not self._isconnected():\n try:\n self._connect()\n except http_client.socket.error as e:\n # Attempt to reconnect if the connection was refused\n if e.errno == 61 or e.errno == 10061:\n # errno 61 is the \"Connection Refused\" error\n time.sleep(self._delay)\n self._delay += self._delay # fibonacii delays\n self._tries += 1\n if self._tries < self.maxtries:\n self._reconnect()\n else:\n self._reset_retries()\n raise e\n else:\n # Unknown scenario\n raise e\n\n # Reconnect worked - reset _closed\n self._closed = False\n\n def _reset_retries(self):\n ''' Reset the connect counters and delays\n '''\n self._tries = 0\n self._delay = 1\n\n\nclass _FakeSocket(six.StringIO):\n # Used to construct a http_client.HTTPResponse object\n # from a string.\n # Thx to: http://pythonwise.blogspot.ca/2010/02/parse-http-response.html\n def makefile(self, *args, **kwargs):\n return self\n", "path": "plotly/plotly/chunked_requests/chunked_request.py"}]}
3,258
701
gh_patches_debug_33292
rasdani/github-patches
git_diff
dask__dask-1391
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Error requirements starting dask-scheduler on HPC cluster Hi I installed dask on python3 via pip3 on HPC node. when I run the scheduler I get the following error: File "/home/x_simco/test/lib/python3.4/site-packages/dask/compatibility.py", line 23, in <module> from lzmaffi import (LZMAFile, compress as lzma_compress, ImportError: No module named 'lzmaffi' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/x_simco/test/bin/dask-scheduler", line 7, in <module> from distributed.cli.dask_scheduler import go File "/home/x_simco/test/lib/python3.4/site-packages/distributed/**init**.py", line 3, in <module> from .center import Center File "/home/x_simco/test/lib/python3.4/site-packages/distributed/center.py", line 13, in <module> from .core import (Server, read, write, rpc, pingpong, send_recv, File "/home/x_simco/test/lib/python3.4/site-packages/distributed/core.py", line 26, in <module> from .utils import get_traceback, truncate_exception, ignoring File "/home/x_simco/test/lib/python3.4/site-packages/distributed/utils.py", line 15, in <module> from dask import istask File "/home/x_simco/test/lib/python3.4/site-packages/dask/**init**.py", line 5, in <module> from .async import get_sync as get File "/home/x_simco/test/lib/python3.4/site-packages/dask/async.py", line 126, in <module> from .optimize import cull File "/home/x_simco/test/lib/python3.4/site-packages/dask/optimize.py", line 6, in <module> from .compatibility import zip_longest File "/home/x_simco/test/lib/python3.4/site-packages/dask/compatibility.py", line 26, in <module> from lzma import (LZMAFile, compress as lzma_compress, File "/software/apps/python/3.4.1/snic-1/lib/python3.4/lzma.py", line 26, in <module> from _lzma import * </issue> <code> [start of dask/compatibility.py] 1 from __future__ import absolute_import, division, print_function 2 3 import functools 4 import inspect 5 import operator 6 import sys 7 import types 8 9 PY3 = sys.version_info[0] == 3 10 PY2 = sys.version_info[0] == 2 11 12 LZMA_AVAILABLE = True 13 14 if PY3: 15 import builtins 16 from queue import Queue, Empty 17 from itertools import zip_longest 18 from io import StringIO, BytesIO 19 from bz2 import BZ2File 20 from gzip import (GzipFile, compress as gzip_compress, 21 decompress as gzip_decompress) 22 try: 23 from lzmaffi import (LZMAFile, compress as lzma_compress, 24 decompress as lzma_decompress) 25 except ImportError: 26 from lzma import (LZMAFile, compress as lzma_compress, 27 decompress as lzma_decompress) 28 from urllib.request import urlopen 29 from urllib.parse import urlparse 30 from urllib.parse import quote, unquote 31 FileNotFoundError = FileNotFoundError 32 unicode = str 33 long = int 34 zip = zip 35 def apply(func, args, kwargs=None): 36 if kwargs: 37 return func(*args, **kwargs) 38 else: 39 return func(*args) 40 range = range 41 reduce = functools.reduce 42 operator_div = operator.truediv 43 44 def _getargspec(func): 45 return inspect.getfullargspec(func) 46 47 else: 48 import __builtin__ as builtins 49 from Queue import Queue, Empty 50 from itertools import izip_longest as zip_longest, izip as zip 51 from StringIO import StringIO 52 from io import BytesIO, BufferedIOBase 53 import bz2 54 import gzip 55 from urllib2 import urlopen 56 from urlparse import urlparse 57 from urllib import quote, unquote 58 unicode = unicode 59 long = long 60 apply = apply 61 range = xrange 62 reduce = reduce 63 operator_div = operator.div 64 FileNotFoundError = IOError 65 66 def _getargspec(func): 67 return inspect.getargspec(func) 68 69 def gzip_decompress(b): 70 f = gzip.GzipFile(fileobj=BytesIO(b)) 71 result = f.read() 72 f.close() 73 return result 74 75 def gzip_compress(b): 76 bio = BytesIO() 77 f = gzip.GzipFile(fileobj=bio, mode='w') 78 f.write(b) 79 f.close() 80 bio.seek(0) 81 result = bio.read() 82 return result 83 84 if sys.version_info[1] <= 7: 85 class BZ2File(BufferedIOBase): 86 def __init__(self, *args, **kwargs): 87 self.__obj = bz2.BZ2File(*args, **kwargs) 88 89 def close(self): 90 return self.__obj.close() 91 92 @property 93 def closed(self): 94 return self.__obj.closed 95 96 def flush(self): 97 pass 98 99 def isatty(self): 100 return self.__obj.isatty() 101 102 def read(self, *args, **kwargs): 103 return self.__obj.read(*args, **kwargs) 104 105 def read1(self, *args, **kwargs): 106 return self.__obj.read(*args, **kwargs) 107 108 def readable(self): 109 return 'r' in self.__obj.mode 110 111 def readline(self, *args, **kwargs): 112 return self.__obj.readline(*args, **kwargs) 113 114 def readlines(self, *args, **kwargs): 115 return self.__obj.readlines(*args, **kwargs) 116 117 def seek(self, *args, **kwargs): 118 self.__obj.seek(*args, **kwargs) 119 return self.tell() 120 121 def seekable(self): 122 return self.readable() 123 124 def tell(self): 125 return self.__obj.tell() 126 127 def truncate(self, *args, **kwargs): 128 return self.__obj.truncate(*args, **kwargs) 129 130 def writable(self): 131 return 'w' in self.__obj.mode 132 133 def write(self, *args, **kwargs): 134 return self.__obj.write(*args, **kwargs) 135 136 def writelines(self, *args, **kwargs): 137 return self.__obj.writelines(*args, **kwargs) 138 else: 139 BZ2File = bz2.BZ2File 140 141 if sys.version_info[1] <= 6: 142 class GzipFile(BufferedIOBase): 143 def __init__(self, *args, **kwargs): 144 self.__obj = gzip.GzipFile(*args, **kwargs) 145 146 def close(self): 147 return self.__obj.close() 148 149 @property 150 def closed(self): 151 return self.__obj.fileobj is None 152 153 def flush(self, *args, **kwargs): 154 return self.__obj.flush(*args, **kwargs) 155 156 def isatty(self): 157 return self.__obj.isatty() 158 159 def read(self, *args, **kwargs): 160 return self.__obj.read(*args, **kwargs) 161 162 def read1(self, *args, **kwargs): 163 return self.__obj.read(*args, **kwargs) 164 165 def readable(self): 166 return self.__obj.mode == gzip.READ 167 168 def readline(self, *args, **kwargs): 169 return self.__obj.readline(*args, **kwargs) 170 171 def readlines(self, *args, **kwargs): 172 return self.__obj.readlines(*args, **kwargs) 173 174 def seek(self, *args, **kwargs): 175 self.__obj.seek(*args, **kwargs) 176 return self.tell() 177 178 def seekable(self): 179 # See https://hg.python.org/cpython/file/2.7/Lib/gzip.py#l421 180 return True 181 182 def tell(self): 183 return self.__obj.tell() 184 185 def truncate(self, *args, **kwargs): 186 return self.__obj.truncate(*args, **kwargs) 187 188 def writable(self): 189 return self.__obj.mode == gzip.WRITE 190 191 def write(self, *args, **kwargs): 192 return self.__obj.write(*args, **kwargs) 193 194 def writelines(self, *args, **kwargs): 195 return self.__obj.writelines(*args, **kwargs) 196 else: 197 GzipFile = gzip.GzipFile 198 199 try: 200 try: 201 from lzmaffi import (LZMAFile, compress as lzma_compress, 202 decompress as lzma_decompress) 203 except ImportError: 204 from backports.lzma import LZMAFile 205 from backports.lzma import (LZMAFile, compress as lzma_compress, 206 decompress as lzma_decompress) 207 except ImportError: 208 class LZMAFile: 209 def __init__(self, *args, **kwargs): 210 raise ValueError("xz files requires the lzma module. " 211 "To use, install lzmaffi or backports.lzma.") 212 LZMA_AVAILABLE = False 213 214 215 def getargspec(func): 216 """Version of inspect.getargspec that works for functools.partial objects""" 217 if isinstance(func, functools.partial): 218 return _getargspec(func.func) 219 else: 220 if isinstance(func, type): 221 return _getargspec(func.__init__) 222 else: 223 return _getargspec(func) 224 225 def skip(func): 226 return 227 228 229 def bind_method(cls, name, func): 230 """Bind a method to class 231 232 Parameters 233 ---------- 234 235 cls : type 236 class to receive bound method 237 name : basestring 238 name of method on class instance 239 func : function 240 function to be bound as method 241 242 Returns 243 ------- 244 None 245 """ 246 # only python 2 has bound/unbound method issue 247 if not PY3: 248 setattr(cls, name, types.MethodType(func, None, cls)) 249 else: 250 setattr(cls, name, func) 251 [end of dask/compatibility.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/dask/compatibility.py b/dask/compatibility.py --- a/dask/compatibility.py +++ b/dask/compatibility.py @@ -9,7 +9,11 @@ PY3 = sys.version_info[0] == 3 PY2 = sys.version_info[0] == 2 -LZMA_AVAILABLE = True +class LZMAFile: + def __init__(self, *args, **kwargs): + raise ValueError("xz files requires the lzma module. " + "To use, install lzmaffi or backports.lzma.") +LZMA_AVAILABLE = False if PY3: import builtins @@ -20,11 +24,17 @@ from gzip import (GzipFile, compress as gzip_compress, decompress as gzip_decompress) try: - from lzmaffi import (LZMAFile, compress as lzma_compress, - decompress as lzma_decompress) + try: + from lzmaffi import (LZMAFile, compress as lzma_compress, + decompress as lzma_decompress) + except ImportError: + from lzma import (LZMAFile, compress as lzma_compress, + decompress as lzma_decompress) + LZMA_AVAILABLE = True except ImportError: - from lzma import (LZMAFile, compress as lzma_compress, - decompress as lzma_decompress) + # Fallback to top-level definition + pass + from urllib.request import urlopen from urllib.parse import urlparse from urllib.parse import quote, unquote @@ -204,12 +214,10 @@ from backports.lzma import LZMAFile from backports.lzma import (LZMAFile, compress as lzma_compress, decompress as lzma_decompress) + LZMA_AVAILABLE = True except ImportError: - class LZMAFile: - def __init__(self, *args, **kwargs): - raise ValueError("xz files requires the lzma module. " - "To use, install lzmaffi or backports.lzma.") - LZMA_AVAILABLE = False + # Fallback to top-level definition + pass def getargspec(func):
{"golden_diff": "diff --git a/dask/compatibility.py b/dask/compatibility.py\n--- a/dask/compatibility.py\n+++ b/dask/compatibility.py\n@@ -9,7 +9,11 @@\n PY3 = sys.version_info[0] == 3\n PY2 = sys.version_info[0] == 2\n \n-LZMA_AVAILABLE = True\n+class LZMAFile:\n+ def __init__(self, *args, **kwargs):\n+ raise ValueError(\"xz files requires the lzma module. \"\n+ \"To use, install lzmaffi or backports.lzma.\")\n+LZMA_AVAILABLE = False\n \n if PY3:\n import builtins\n@@ -20,11 +24,17 @@\n from gzip import (GzipFile, compress as gzip_compress,\n decompress as gzip_decompress)\n try:\n- from lzmaffi import (LZMAFile, compress as lzma_compress,\n- decompress as lzma_decompress)\n+ try:\n+ from lzmaffi import (LZMAFile, compress as lzma_compress,\n+ decompress as lzma_decompress)\n+ except ImportError:\n+ from lzma import (LZMAFile, compress as lzma_compress,\n+ decompress as lzma_decompress)\n+ LZMA_AVAILABLE = True\n except ImportError:\n- from lzma import (LZMAFile, compress as lzma_compress,\n- decompress as lzma_decompress)\n+ # Fallback to top-level definition\n+ pass\n+\n from urllib.request import urlopen\n from urllib.parse import urlparse\n from urllib.parse import quote, unquote\n@@ -204,12 +214,10 @@\n from backports.lzma import LZMAFile\n from backports.lzma import (LZMAFile, compress as lzma_compress,\n decompress as lzma_decompress)\n+ LZMA_AVAILABLE = True\n except ImportError:\n- class LZMAFile:\n- def __init__(self, *args, **kwargs):\n- raise ValueError(\"xz files requires the lzma module. \"\n- \"To use, install lzmaffi or backports.lzma.\")\n- LZMA_AVAILABLE = False\n+ # Fallback to top-level definition\n+ pass\n \n \n def getargspec(func):\n", "issue": "Error requirements starting dask-scheduler on HPC cluster\nHi\nI installed dask on python3 via pip3 on HPC node. when I run the scheduler I get the following error:\n\nFile \"/home/x_simco/test/lib/python3.4/site-packages/dask/compatibility.py\", line 23, in <module>\n from lzmaffi import (LZMAFile, compress as lzma_compress,\nImportError: No module named 'lzmaffi'\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/home/x_simco/test/bin/dask-scheduler\", line 7, in <module>\n from distributed.cli.dask_scheduler import go\n File \"/home/x_simco/test/lib/python3.4/site-packages/distributed/**init**.py\", line 3, in <module>\n from .center import Center\n File \"/home/x_simco/test/lib/python3.4/site-packages/distributed/center.py\", line 13, in <module>\n from .core import (Server, read, write, rpc, pingpong, send_recv,\n File \"/home/x_simco/test/lib/python3.4/site-packages/distributed/core.py\", line 26, in <module>\n from .utils import get_traceback, truncate_exception, ignoring\n File \"/home/x_simco/test/lib/python3.4/site-packages/distributed/utils.py\", line 15, in <module>\n from dask import istask\n File \"/home/x_simco/test/lib/python3.4/site-packages/dask/**init**.py\", line 5, in <module>\n from .async import get_sync as get\n File \"/home/x_simco/test/lib/python3.4/site-packages/dask/async.py\", line 126, in <module>\n from .optimize import cull\n File \"/home/x_simco/test/lib/python3.4/site-packages/dask/optimize.py\", line 6, in <module>\n from .compatibility import zip_longest\n File \"/home/x_simco/test/lib/python3.4/site-packages/dask/compatibility.py\", line 26, in <module>\n from lzma import (LZMAFile, compress as lzma_compress,\n File \"/software/apps/python/3.4.1/snic-1/lib/python3.4/lzma.py\", line 26, in <module>\n from _lzma import *\n\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport functools\nimport inspect\nimport operator\nimport sys\nimport types\n\nPY3 = sys.version_info[0] == 3\nPY2 = sys.version_info[0] == 2\n\nLZMA_AVAILABLE = True\n\nif PY3:\n import builtins\n from queue import Queue, Empty\n from itertools import zip_longest\n from io import StringIO, BytesIO\n from bz2 import BZ2File\n from gzip import (GzipFile, compress as gzip_compress,\n decompress as gzip_decompress)\n try:\n from lzmaffi import (LZMAFile, compress as lzma_compress,\n decompress as lzma_decompress)\n except ImportError:\n from lzma import (LZMAFile, compress as lzma_compress,\n decompress as lzma_decompress)\n from urllib.request import urlopen\n from urllib.parse import urlparse\n from urllib.parse import quote, unquote\n FileNotFoundError = FileNotFoundError\n unicode = str\n long = int\n zip = zip\n def apply(func, args, kwargs=None):\n if kwargs:\n return func(*args, **kwargs)\n else:\n return func(*args)\n range = range\n reduce = functools.reduce\n operator_div = operator.truediv\n\n def _getargspec(func):\n return inspect.getfullargspec(func)\n\nelse:\n import __builtin__ as builtins\n from Queue import Queue, Empty\n from itertools import izip_longest as zip_longest, izip as zip\n from StringIO import StringIO\n from io import BytesIO, BufferedIOBase\n import bz2\n import gzip\n from urllib2 import urlopen\n from urlparse import urlparse\n from urllib import quote, unquote\n unicode = unicode\n long = long\n apply = apply\n range = xrange\n reduce = reduce\n operator_div = operator.div\n FileNotFoundError = IOError\n\n def _getargspec(func):\n return inspect.getargspec(func)\n\n def gzip_decompress(b):\n f = gzip.GzipFile(fileobj=BytesIO(b))\n result = f.read()\n f.close()\n return result\n\n def gzip_compress(b):\n bio = BytesIO()\n f = gzip.GzipFile(fileobj=bio, mode='w')\n f.write(b)\n f.close()\n bio.seek(0)\n result = bio.read()\n return result\n\n if sys.version_info[1] <= 7:\n class BZ2File(BufferedIOBase):\n def __init__(self, *args, **kwargs):\n self.__obj = bz2.BZ2File(*args, **kwargs)\n\n def close(self):\n return self.__obj.close()\n\n @property\n def closed(self):\n return self.__obj.closed\n\n def flush(self):\n pass\n\n def isatty(self):\n return self.__obj.isatty()\n\n def read(self, *args, **kwargs):\n return self.__obj.read(*args, **kwargs)\n\n def read1(self, *args, **kwargs):\n return self.__obj.read(*args, **kwargs)\n\n def readable(self):\n return 'r' in self.__obj.mode\n\n def readline(self, *args, **kwargs):\n return self.__obj.readline(*args, **kwargs)\n\n def readlines(self, *args, **kwargs):\n return self.__obj.readlines(*args, **kwargs)\n\n def seek(self, *args, **kwargs):\n self.__obj.seek(*args, **kwargs)\n return self.tell()\n\n def seekable(self):\n return self.readable()\n\n def tell(self):\n return self.__obj.tell()\n\n def truncate(self, *args, **kwargs):\n return self.__obj.truncate(*args, **kwargs)\n\n def writable(self):\n return 'w' in self.__obj.mode\n\n def write(self, *args, **kwargs):\n return self.__obj.write(*args, **kwargs)\n\n def writelines(self, *args, **kwargs):\n return self.__obj.writelines(*args, **kwargs)\n else:\n BZ2File = bz2.BZ2File\n\n if sys.version_info[1] <= 6:\n class GzipFile(BufferedIOBase):\n def __init__(self, *args, **kwargs):\n self.__obj = gzip.GzipFile(*args, **kwargs)\n\n def close(self):\n return self.__obj.close()\n\n @property\n def closed(self):\n return self.__obj.fileobj is None\n\n def flush(self, *args, **kwargs):\n return self.__obj.flush(*args, **kwargs)\n\n def isatty(self):\n return self.__obj.isatty()\n\n def read(self, *args, **kwargs):\n return self.__obj.read(*args, **kwargs)\n\n def read1(self, *args, **kwargs):\n return self.__obj.read(*args, **kwargs)\n\n def readable(self):\n return self.__obj.mode == gzip.READ\n\n def readline(self, *args, **kwargs):\n return self.__obj.readline(*args, **kwargs)\n\n def readlines(self, *args, **kwargs):\n return self.__obj.readlines(*args, **kwargs)\n\n def seek(self, *args, **kwargs):\n self.__obj.seek(*args, **kwargs)\n return self.tell()\n\n def seekable(self):\n # See https://hg.python.org/cpython/file/2.7/Lib/gzip.py#l421\n return True\n\n def tell(self):\n return self.__obj.tell()\n\n def truncate(self, *args, **kwargs):\n return self.__obj.truncate(*args, **kwargs)\n\n def writable(self):\n return self.__obj.mode == gzip.WRITE\n\n def write(self, *args, **kwargs):\n return self.__obj.write(*args, **kwargs)\n\n def writelines(self, *args, **kwargs):\n return self.__obj.writelines(*args, **kwargs)\n else:\n GzipFile = gzip.GzipFile\n\n try:\n try:\n from lzmaffi import (LZMAFile, compress as lzma_compress,\n decompress as lzma_decompress)\n except ImportError:\n from backports.lzma import LZMAFile\n from backports.lzma import (LZMAFile, compress as lzma_compress,\n decompress as lzma_decompress)\n except ImportError:\n class LZMAFile:\n def __init__(self, *args, **kwargs):\n raise ValueError(\"xz files requires the lzma module. \"\n \"To use, install lzmaffi or backports.lzma.\")\n LZMA_AVAILABLE = False\n\n\ndef getargspec(func):\n \"\"\"Version of inspect.getargspec that works for functools.partial objects\"\"\"\n if isinstance(func, functools.partial):\n return _getargspec(func.func)\n else:\n if isinstance(func, type):\n return _getargspec(func.__init__)\n else:\n return _getargspec(func)\n\ndef skip(func):\n return\n\n\ndef bind_method(cls, name, func):\n \"\"\"Bind a method to class\n\n Parameters\n ----------\n\n cls : type\n class to receive bound method\n name : basestring\n name of method on class instance\n func : function\n function to be bound as method\n\n Returns\n -------\n None\n \"\"\"\n # only python 2 has bound/unbound method issue\n if not PY3:\n setattr(cls, name, types.MethodType(func, None, cls))\n else:\n setattr(cls, name, func)\n", "path": "dask/compatibility.py"}]}
3,387
512
gh_patches_debug_29595
rasdani/github-patches
git_diff
pyinstaller__pyinstaller-7414
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Log level `DEPRECATION` is documented but not working ## Description of the issue Log level `DEPRECATION` is documented but not working. ### Context information (for bug reports) * Output of `pyinstaller --version`: ```5.7.0``` * Version of Python: 3.10.6 * Platform: Ubuntu 22.04 * How you installed Python: apt * Did you also try this on another platform? Does it work there? yes, same thing * try the latest development version, using the following command: yes, same thing ### A minimal example program which shows the error ``` $ pyinstaller --help | grep -U1 DEPREC --log-level LEVEL Amount of detail in build-time console messages. LEVEL may be one of TRACE, DEBUG, INFO, WARN, DEPRECATION, ERROR, CRITICAL (default: INFO). Also settable via and $ pyinstaller --log-level DEPRECATION . [...] pyinstaller: error: Unknown log level `DEPRECATION` ``` </issue> <code> [start of PyInstaller/log.py] 1 #----------------------------------------------------------------------------- 2 # Copyright (c) 2013-2023, PyInstaller Development Team. 3 # 4 # Distributed under the terms of the GNU General Public License (version 2 5 # or later) with exception for distributing the bootloader. 6 # 7 # The full license is in the file COPYING.txt, distributed with this software. 8 # 9 # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception) 10 #----------------------------------------------------------------------------- 11 """ 12 Logging module for PyInstaller. 13 """ 14 15 __all__ = ['getLogger', 'INFO', 'WARN', 'DEBUG', 'TRACE', 'ERROR', 'FATAL', 'DEPRECATION'] 16 17 import os 18 import logging 19 from logging import DEBUG, ERROR, FATAL, INFO, WARN, getLogger 20 21 TRACE = logging.TRACE = DEBUG - 5 22 logging.addLevelName(TRACE, 'TRACE') 23 DEPRECATION = WARN + 5 24 logging.addLevelName(DEPRECATION, 'DEPRECATION') 25 LEVELS = ('TRACE', 'DEBUG', 'INFO', 'WARN', 'DEPRECATION', 'ERROR', 'CRITICAL') 26 27 FORMAT = '%(relativeCreated)d %(levelname)s: %(message)s' 28 _env_level = os.environ.get("PYI_LOG_LEVEL", "INFO") 29 try: 30 level = getattr(logging, _env_level.upper()) 31 except AttributeError: 32 raise SystemExit(f"Invalid PYI_LOG_LEVEL value '{_env_level}'. Should be one of {LEVELS}.") 33 logging.basicConfig(format=FORMAT, level=level) 34 logger = getLogger('PyInstaller') 35 36 37 def __add_options(parser): 38 parser.add_argument( 39 '--log-level', 40 choices=LEVELS, 41 metavar="LEVEL", 42 dest='loglevel', 43 help='Amount of detail in build-time console messages. LEVEL may be one of %s (default: INFO). ' 44 'Also settable via and overrides the PYI_LOG_LEVEL environment variable.' % ', '.join(LEVELS), 45 ) 46 47 48 def __process_options(parser, opts): 49 if opts.loglevel: 50 try: 51 level = opts.loglevel.upper() 52 _level = getattr(logging, level) 53 except AttributeError: 54 parser.error('Unknown log level `%s`' % opts.loglevel) 55 logger.setLevel(_level) 56 os.environ["PYI_LOG_LEVEL"] = level 57 [end of PyInstaller/log.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/PyInstaller/log.py b/PyInstaller/log.py --- a/PyInstaller/log.py +++ b/PyInstaller/log.py @@ -18,18 +18,26 @@ import logging from logging import DEBUG, ERROR, FATAL, INFO, WARN, getLogger -TRACE = logging.TRACE = DEBUG - 5 +TRACE = DEBUG - 5 logging.addLevelName(TRACE, 'TRACE') DEPRECATION = WARN + 5 logging.addLevelName(DEPRECATION, 'DEPRECATION') -LEVELS = ('TRACE', 'DEBUG', 'INFO', 'WARN', 'DEPRECATION', 'ERROR', 'CRITICAL') +LEVELS = { + 'TRACE': TRACE, + 'DEBUG': DEBUG, + 'INFO': INFO, + 'WARN': WARN, + 'DEPRECATION': DEPRECATION, + 'ERROR': ERROR, + 'FATAL': FATAL, +} FORMAT = '%(relativeCreated)d %(levelname)s: %(message)s' _env_level = os.environ.get("PYI_LOG_LEVEL", "INFO") try: - level = getattr(logging, _env_level.upper()) -except AttributeError: - raise SystemExit(f"Invalid PYI_LOG_LEVEL value '{_env_level}'. Should be one of {LEVELS}.") + level = LEVELS[_env_level.upper()] +except KeyError: + raise SystemExit(f"Invalid PYI_LOG_LEVEL value '{_env_level}'. Should be one of {list(LEVELS)}.") logging.basicConfig(format=FORMAT, level=level) logger = getLogger('PyInstaller') @@ -49,8 +57,8 @@ if opts.loglevel: try: level = opts.loglevel.upper() - _level = getattr(logging, level) - except AttributeError: + _level = LEVELS[level] + except KeyError: parser.error('Unknown log level `%s`' % opts.loglevel) logger.setLevel(_level) os.environ["PYI_LOG_LEVEL"] = level
{"golden_diff": "diff --git a/PyInstaller/log.py b/PyInstaller/log.py\n--- a/PyInstaller/log.py\n+++ b/PyInstaller/log.py\n@@ -18,18 +18,26 @@\n import logging\n from logging import DEBUG, ERROR, FATAL, INFO, WARN, getLogger\n \n-TRACE = logging.TRACE = DEBUG - 5\n+TRACE = DEBUG - 5\n logging.addLevelName(TRACE, 'TRACE')\n DEPRECATION = WARN + 5\n logging.addLevelName(DEPRECATION, 'DEPRECATION')\n-LEVELS = ('TRACE', 'DEBUG', 'INFO', 'WARN', 'DEPRECATION', 'ERROR', 'CRITICAL')\n+LEVELS = {\n+ 'TRACE': TRACE,\n+ 'DEBUG': DEBUG,\n+ 'INFO': INFO,\n+ 'WARN': WARN,\n+ 'DEPRECATION': DEPRECATION,\n+ 'ERROR': ERROR,\n+ 'FATAL': FATAL,\n+}\n \n FORMAT = '%(relativeCreated)d %(levelname)s: %(message)s'\n _env_level = os.environ.get(\"PYI_LOG_LEVEL\", \"INFO\")\n try:\n- level = getattr(logging, _env_level.upper())\n-except AttributeError:\n- raise SystemExit(f\"Invalid PYI_LOG_LEVEL value '{_env_level}'. Should be one of {LEVELS}.\")\n+ level = LEVELS[_env_level.upper()]\n+except KeyError:\n+ raise SystemExit(f\"Invalid PYI_LOG_LEVEL value '{_env_level}'. Should be one of {list(LEVELS)}.\")\n logging.basicConfig(format=FORMAT, level=level)\n logger = getLogger('PyInstaller')\n \n@@ -49,8 +57,8 @@\n if opts.loglevel:\n try:\n level = opts.loglevel.upper()\n- _level = getattr(logging, level)\n- except AttributeError:\n+ _level = LEVELS[level]\n+ except KeyError:\n parser.error('Unknown log level `%s`' % opts.loglevel)\n logger.setLevel(_level)\n os.environ[\"PYI_LOG_LEVEL\"] = level\n", "issue": "Log level `DEPRECATION` is documented but not working\n## Description of the issue\r\n\r\nLog level `DEPRECATION` is documented but not working.\r\n\r\n### Context information (for bug reports)\r\n\r\n* Output of `pyinstaller --version`: ```5.7.0```\r\n* Version of Python: 3.10.6\r\n* Platform: Ubuntu 22.04\r\n* How you installed Python: apt\r\n* Did you also try this on another platform? Does it work there? yes, same thing\r\n* try the latest development version, using the following command: yes, same thing\r\n\r\n### A minimal example program which shows the error\r\n\r\n```\r\n$ pyinstaller --help | grep -U1 DEPREC\r\n --log-level LEVEL Amount of detail in build-time console messages. LEVEL\r\n may be one of TRACE, DEBUG, INFO, WARN, DEPRECATION,\r\n ERROR, CRITICAL (default: INFO). Also settable via and\r\n$ pyinstaller --log-level DEPRECATION .\r\n[...]\r\npyinstaller: error: Unknown log level `DEPRECATION`\r\n```\r\n\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2013-2023, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\"\"\"\nLogging module for PyInstaller.\n\"\"\"\n\n__all__ = ['getLogger', 'INFO', 'WARN', 'DEBUG', 'TRACE', 'ERROR', 'FATAL', 'DEPRECATION']\n\nimport os\nimport logging\nfrom logging import DEBUG, ERROR, FATAL, INFO, WARN, getLogger\n\nTRACE = logging.TRACE = DEBUG - 5\nlogging.addLevelName(TRACE, 'TRACE')\nDEPRECATION = WARN + 5\nlogging.addLevelName(DEPRECATION, 'DEPRECATION')\nLEVELS = ('TRACE', 'DEBUG', 'INFO', 'WARN', 'DEPRECATION', 'ERROR', 'CRITICAL')\n\nFORMAT = '%(relativeCreated)d %(levelname)s: %(message)s'\n_env_level = os.environ.get(\"PYI_LOG_LEVEL\", \"INFO\")\ntry:\n level = getattr(logging, _env_level.upper())\nexcept AttributeError:\n raise SystemExit(f\"Invalid PYI_LOG_LEVEL value '{_env_level}'. Should be one of {LEVELS}.\")\nlogging.basicConfig(format=FORMAT, level=level)\nlogger = getLogger('PyInstaller')\n\n\ndef __add_options(parser):\n parser.add_argument(\n '--log-level',\n choices=LEVELS,\n metavar=\"LEVEL\",\n dest='loglevel',\n help='Amount of detail in build-time console messages. LEVEL may be one of %s (default: INFO). '\n 'Also settable via and overrides the PYI_LOG_LEVEL environment variable.' % ', '.join(LEVELS),\n )\n\n\ndef __process_options(parser, opts):\n if opts.loglevel:\n try:\n level = opts.loglevel.upper()\n _level = getattr(logging, level)\n except AttributeError:\n parser.error('Unknown log level `%s`' % opts.loglevel)\n logger.setLevel(_level)\n os.environ[\"PYI_LOG_LEVEL\"] = level\n", "path": "PyInstaller/log.py"}]}
1,347
438
gh_patches_debug_9449
rasdani/github-patches
git_diff
mirumee__ariadne-523
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Remove superfluous schema validation It turns out that `validate_schema` called in https://github.com/mirumee/ariadne/blob/master/ariadne/executable_schema.py#L26 is not needed here. In the other hand, `assert_validate_schema` is called here: https://github.com/mirumee/ariadne/blob/master/ariadne/executable_schema.py#L40 which is sufficient. Fixes #523 </issue> <code> [start of ariadne/executable_schema.py] 1 from typing import Dict, List, Type, Union 2 3 from graphql import ( 4 GraphQLSchema, 5 assert_valid_schema, 6 build_ast_schema, 7 parse, 8 validate_schema, 9 ) 10 11 from .enums import set_default_enum_values_on_schema 12 from .schema_visitor import SchemaDirectiveVisitor 13 from .types import SchemaBindable 14 15 16 def make_executable_schema( 17 type_defs: Union[str, List[str]], 18 *bindables: Union[SchemaBindable, List[SchemaBindable]], 19 directives: Dict[str, Type[SchemaDirectiveVisitor]] = None, 20 ) -> GraphQLSchema: 21 if isinstance(type_defs, list): 22 type_defs = join_type_defs(type_defs) 23 24 ast_document = parse(type_defs) 25 schema = build_ast_schema(ast_document) 26 validate_schema(schema) 27 28 for bindable in bindables: 29 if isinstance(bindable, list): 30 for obj in bindable: 31 obj.bind_to_schema(schema) 32 else: 33 bindable.bind_to_schema(schema) 34 35 set_default_enum_values_on_schema(schema) 36 37 if directives: 38 SchemaDirectiveVisitor.visit_schema_directives(schema, directives) 39 40 assert_valid_schema(schema) 41 42 return schema 43 44 45 def join_type_defs(type_defs: List[str]) -> str: 46 return "\n\n".join(t.strip() for t in type_defs) 47 [end of ariadne/executable_schema.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ariadne/executable_schema.py b/ariadne/executable_schema.py --- a/ariadne/executable_schema.py +++ b/ariadne/executable_schema.py @@ -5,7 +5,6 @@ assert_valid_schema, build_ast_schema, parse, - validate_schema, ) from .enums import set_default_enum_values_on_schema @@ -23,7 +22,6 @@ ast_document = parse(type_defs) schema = build_ast_schema(ast_document) - validate_schema(schema) for bindable in bindables: if isinstance(bindable, list):
{"golden_diff": "diff --git a/ariadne/executable_schema.py b/ariadne/executable_schema.py\n--- a/ariadne/executable_schema.py\n+++ b/ariadne/executable_schema.py\n@@ -5,7 +5,6 @@\n assert_valid_schema,\n build_ast_schema,\n parse,\n- validate_schema,\n )\n \n from .enums import set_default_enum_values_on_schema\n@@ -23,7 +22,6 @@\n \n ast_document = parse(type_defs)\n schema = build_ast_schema(ast_document)\n- validate_schema(schema)\n \n for bindable in bindables:\n if isinstance(bindable, list):\n", "issue": "Remove superfluous schema validation\nIt turns out that `validate_schema` called in https://github.com/mirumee/ariadne/blob/master/ariadne/executable_schema.py#L26 is not needed here. \r\nIn the other hand, `assert_validate_schema` is called here: https://github.com/mirumee/ariadne/blob/master/ariadne/executable_schema.py#L40 which is sufficient. \r\n\r\nFixes #523 \n", "before_files": [{"content": "from typing import Dict, List, Type, Union\n\nfrom graphql import (\n GraphQLSchema,\n assert_valid_schema,\n build_ast_schema,\n parse,\n validate_schema,\n)\n\nfrom .enums import set_default_enum_values_on_schema\nfrom .schema_visitor import SchemaDirectiveVisitor\nfrom .types import SchemaBindable\n\n\ndef make_executable_schema(\n type_defs: Union[str, List[str]],\n *bindables: Union[SchemaBindable, List[SchemaBindable]],\n directives: Dict[str, Type[SchemaDirectiveVisitor]] = None,\n) -> GraphQLSchema:\n if isinstance(type_defs, list):\n type_defs = join_type_defs(type_defs)\n\n ast_document = parse(type_defs)\n schema = build_ast_schema(ast_document)\n validate_schema(schema)\n\n for bindable in bindables:\n if isinstance(bindable, list):\n for obj in bindable:\n obj.bind_to_schema(schema)\n else:\n bindable.bind_to_schema(schema)\n\n set_default_enum_values_on_schema(schema)\n\n if directives:\n SchemaDirectiveVisitor.visit_schema_directives(schema, directives)\n\n assert_valid_schema(schema)\n\n return schema\n\n\ndef join_type_defs(type_defs: List[str]) -> str:\n return \"\\n\\n\".join(t.strip() for t in type_defs)\n", "path": "ariadne/executable_schema.py"}]}
998
138
gh_patches_debug_51325
rasdani/github-patches
git_diff
scikit-image__scikit-image-6307
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Undefined names in Python code found with flake8 ## Description ## Way to reproduce [flake8](http://flake8.pycqa.org) testing of https://github.com/scikit-image/scikit-image on Python 3.7.1 $ __flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics__ ``` ./skimage/measure/mc_meta/createluts.py:139:18: F821 undefined name 'luts' for a in dir(luts): ^ ./doc/ext/notebook_doc.py:1:1: F822 undefined name 'python_to_notebook' in __all__ __all__ = ['python_to_notebook', 'Notebook'] ^ 1 F821 undefined name 'luts' 1 F822 undefined name 'python_to_notebook' in __all__ 2 ``` __E901,E999,F821,F822,F823__ are the "_showstopper_" [flake8](http://flake8.pycqa.org) issues that can halt the runtime with a SyntaxError, NameError, etc. These 5 are different from most other flake8 issues which are merely "style violations" -- useful for readability but they do not effect runtime safety. * F821: undefined name `name` * F822: undefined name `name` in `__all__` * F823: local variable name referenced before assignment * E901: SyntaxError or IndentationError * E999: SyntaxError -- failed to compile a file into an Abstract Syntax Tree </issue> <code> [start of doc/ext/notebook_doc.py] 1 __all__ = ['python_to_notebook', 'Notebook'] 2 3 import json 4 import copy 5 import warnings 6 7 8 # Skeleton notebook in JSON format 9 skeleton_nb = """{ 10 "metadata": { 11 "name":"" 12 }, 13 "nbformat": 3, 14 "nbformat_minor": 0, 15 "worksheets": [ 16 { 17 "cells": [ 18 { 19 "cell_type": "code", 20 "collapsed": false, 21 "input": [ 22 "%matplotlib inline" 23 ], 24 "language": "python", 25 "metadata": {}, 26 "outputs": [] 27 } 28 ], 29 "metadata": {} 30 } 31 ] 32 }""" 33 34 35 class Notebook(object): 36 """ 37 Notebook object for building an IPython notebook cell-by-cell. 38 """ 39 40 def __init__(self): 41 # cell type code 42 self.cell_code = { 43 'cell_type': 'code', 44 'collapsed': False, 45 'input': [ 46 '# Code Goes Here' 47 ], 48 'language': 'python', 49 'metadata': {}, 50 'outputs': [] 51 } 52 53 # cell type markdown 54 self.cell_md = { 55 'cell_type': 'markdown', 56 'metadata': {}, 57 'source': [ 58 'Markdown Goes Here' 59 ] 60 } 61 62 self.template = json.loads(skeleton_nb) 63 self.cell_type = {'input': self.cell_code, 'source': self.cell_md} 64 self.valuetype_to_celltype = {'code': 'input', 'markdown': 'source'} 65 66 def add_cell(self, value, cell_type='code'): 67 """Add a notebook cell. 68 69 Parameters 70 ---------- 71 value : str 72 Cell content. 73 cell_type : {'code', 'markdown'} 74 Type of content (default is 'code'). 75 76 """ 77 if cell_type in ['markdown', 'code']: 78 key = self.valuetype_to_celltype[cell_type] 79 cells = self.template['worksheets'][0]['cells'] 80 cells.append(copy.deepcopy(self.cell_type[key])) 81 # assign value to the last cell 82 cells[-1][key] = value 83 else: 84 warnings.warn('Ignoring unsupported cell type (%s)' % cell_type) 85 86 def json(self): 87 """Return a JSON representation of the notebook. 88 89 Returns 90 ------- 91 str 92 JSON notebook. 93 94 """ 95 return json.dumps(self.template, indent=2) 96 97 98 [end of doc/ext/notebook_doc.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/doc/ext/notebook_doc.py b/doc/ext/notebook_doc.py --- a/doc/ext/notebook_doc.py +++ b/doc/ext/notebook_doc.py @@ -1,4 +1,4 @@ -__all__ = ['python_to_notebook', 'Notebook'] +__all__ = ['Notebook'] import json import copy
{"golden_diff": "diff --git a/doc/ext/notebook_doc.py b/doc/ext/notebook_doc.py\n--- a/doc/ext/notebook_doc.py\n+++ b/doc/ext/notebook_doc.py\n@@ -1,4 +1,4 @@\n-__all__ = ['python_to_notebook', 'Notebook']\n+__all__ = ['Notebook']\n \n import json\n import copy\n", "issue": "Undefined names in Python code found with flake8\n## Description\r\n\r\n\r\n## Way to reproduce\r\n[flake8](http://flake8.pycqa.org) testing of https://github.com/scikit-image/scikit-image on Python 3.7.1\r\n\r\n$ __flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics__\r\n```\r\n./skimage/measure/mc_meta/createluts.py:139:18: F821 undefined name 'luts'\r\n for a in dir(luts):\r\n ^\r\n./doc/ext/notebook_doc.py:1:1: F822 undefined name 'python_to_notebook' in __all__\r\n__all__ = ['python_to_notebook', 'Notebook']\r\n^\r\n1 F821 undefined name 'luts'\r\n1 F822 undefined name 'python_to_notebook' in __all__\r\n2\r\n```\r\n__E901,E999,F821,F822,F823__ are the \"_showstopper_\" [flake8](http://flake8.pycqa.org) issues that can halt the runtime with a SyntaxError, NameError, etc. These 5 are different from most other flake8 issues which are merely \"style violations\" -- useful for readability but they do not effect runtime safety.\r\n* F821: undefined name `name`\r\n* F822: undefined name `name` in `__all__`\r\n* F823: local variable name referenced before assignment\r\n* E901: SyntaxError or IndentationError\r\n* E999: SyntaxError -- failed to compile a file into an Abstract Syntax Tree\r\n\n", "before_files": [{"content": "__all__ = ['python_to_notebook', 'Notebook']\n\nimport json\nimport copy\nimport warnings\n\n\n# Skeleton notebook in JSON format\nskeleton_nb = \"\"\"{\n \"metadata\": {\n \"name\":\"\"\n },\n \"nbformat\": 3,\n \"nbformat_minor\": 0,\n \"worksheets\": [\n {\n \"cells\": [\n {\n \"cell_type\": \"code\",\n \"collapsed\": false,\n \"input\": [\n \"%matplotlib inline\"\n ],\n \"language\": \"python\",\n \"metadata\": {},\n \"outputs\": []\n }\n ],\n \"metadata\": {}\n }\n ]\n}\"\"\"\n\n\nclass Notebook(object):\n \"\"\"\n Notebook object for building an IPython notebook cell-by-cell.\n \"\"\"\n\n def __init__(self):\n # cell type code\n self.cell_code = {\n 'cell_type': 'code',\n 'collapsed': False,\n 'input': [\n '# Code Goes Here'\n ],\n 'language': 'python',\n 'metadata': {},\n 'outputs': []\n }\n\n # cell type markdown\n self.cell_md = {\n 'cell_type': 'markdown',\n 'metadata': {},\n 'source': [\n 'Markdown Goes Here'\n ]\n }\n\n self.template = json.loads(skeleton_nb)\n self.cell_type = {'input': self.cell_code, 'source': self.cell_md}\n self.valuetype_to_celltype = {'code': 'input', 'markdown': 'source'}\n\n def add_cell(self, value, cell_type='code'):\n \"\"\"Add a notebook cell.\n\n Parameters\n ----------\n value : str\n Cell content.\n cell_type : {'code', 'markdown'}\n Type of content (default is 'code').\n\n \"\"\"\n if cell_type in ['markdown', 'code']:\n key = self.valuetype_to_celltype[cell_type]\n cells = self.template['worksheets'][0]['cells']\n cells.append(copy.deepcopy(self.cell_type[key]))\n # assign value to the last cell\n cells[-1][key] = value\n else:\n warnings.warn('Ignoring unsupported cell type (%s)' % cell_type)\n\n def json(self):\n \"\"\"Return a JSON representation of the notebook.\n\n Returns\n -------\n str\n JSON notebook.\n\n \"\"\"\n return json.dumps(self.template, indent=2)\n\n\n", "path": "doc/ext/notebook_doc.py"}]}
1,599
81
gh_patches_debug_22583
rasdani/github-patches
git_diff
PennyLaneAI__pennylane-583
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Avoid quadratic scaling of template integration tests #### Issue description Currently, in ``test_templates.py`` a compatibility test is performed by having every template in the library (of the same device) applied before every other one - a quadratic growth of test cases in the number of templates. This becomes prohibitive, and we should find another solution that tests templates' compatibility. #### Additional information The issue could be easily fixed by defining small dummy templates that are called before and after the tested template. We could also try to make the tested templates pass parameters to each other. </issue> <code> [start of pennylane/templates/embeddings/basis.py] 1 # Copyright 2018-2020 Xanadu Quantum Technologies Inc. 2 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 7 # http://www.apache.org/licenses/LICENSE-2.0 8 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 r""" 15 Contains the ``BasisEmbedding`` template. 16 """ 17 # pylint: disable-msg=too-many-branches,too-many-arguments,protected-access 18 import numpy as np 19 20 from pennylane.templates.decorator import template 21 from pennylane.ops import BasisState 22 from pennylane.templates.utils import check_shape, check_wires, get_shape 23 24 25 @template 26 def BasisEmbedding(features, wires): 27 r"""Encodes :math:`n` binary features into a basis state of :math:`n` qubits. 28 29 For example, for ``features=np.array([0, 1, 0])``, the quantum system will be 30 prepared in state :math:`|010 \rangle`. 31 32 .. warning:: 33 34 ``BasisEmbedding`` calls a circuit whose architecture depends on the binary features. 35 The ``features`` argument is therefore not differentiable when using the template, and 36 gradients with respect to the argument cannot be computed by PennyLane. 37 38 Args: 39 features (array): binary input array of shape ``(n, )`` 40 wires (Sequence[int] or int): qubit indices that the template acts on 41 42 Raises: 43 ValueError: if inputs do not have the correct format 44 """ 45 46 ############# 47 # Input checks 48 49 wires = check_wires(wires) 50 51 expected_shape = (len(wires),) 52 check_shape( 53 features, 54 expected_shape, 55 msg="'features' must be of shape {}; got {}" "".format(expected_shape, get_shape(features)), 56 ) 57 58 if any([b not in [0, 1] for b in features]): 59 raise ValueError("'basis_state' must only consist of 0s and 1s; got {}".format(features)) 60 61 ############### 62 63 features = np.array(features) 64 BasisState(features, wires=wires) 65 [end of pennylane/templates/embeddings/basis.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pennylane/templates/embeddings/basis.py b/pennylane/templates/embeddings/basis.py --- a/pennylane/templates/embeddings/basis.py +++ b/pennylane/templates/embeddings/basis.py @@ -15,11 +15,11 @@ Contains the ``BasisEmbedding`` template. """ # pylint: disable-msg=too-many-branches,too-many-arguments,protected-access -import numpy as np +from collections import Iterable from pennylane.templates.decorator import template -from pennylane.ops import BasisState -from pennylane.templates.utils import check_shape, check_wires, get_shape +from pennylane.templates.utils import check_shape, check_wires, get_shape, check_type +import pennylane as qml @template @@ -48,6 +48,10 @@ wires = check_wires(wires) + check_type( + features, [Iterable], msg="'features' must be iterable; got type {}".format(type(features)) + ) + expected_shape = (len(wires),) check_shape( features, @@ -60,5 +64,6 @@ ############### - features = np.array(features) - BasisState(features, wires=wires) + for wire, bit in zip(wires, features): + if bit == 1: + qml.PauliX(wire)
{"golden_diff": "diff --git a/pennylane/templates/embeddings/basis.py b/pennylane/templates/embeddings/basis.py\n--- a/pennylane/templates/embeddings/basis.py\n+++ b/pennylane/templates/embeddings/basis.py\n@@ -15,11 +15,11 @@\n Contains the ``BasisEmbedding`` template.\n \"\"\"\n # pylint: disable-msg=too-many-branches,too-many-arguments,protected-access\n-import numpy as np\n+from collections import Iterable\n \n from pennylane.templates.decorator import template\n-from pennylane.ops import BasisState\n-from pennylane.templates.utils import check_shape, check_wires, get_shape\n+from pennylane.templates.utils import check_shape, check_wires, get_shape, check_type\n+import pennylane as qml\n \n \n @template\n@@ -48,6 +48,10 @@\n \n wires = check_wires(wires)\n \n+ check_type(\n+ features, [Iterable], msg=\"'features' must be iterable; got type {}\".format(type(features))\n+ )\n+\n expected_shape = (len(wires),)\n check_shape(\n features,\n@@ -60,5 +64,6 @@\n \n ###############\n \n- features = np.array(features)\n- BasisState(features, wires=wires)\n+ for wire, bit in zip(wires, features):\n+ if bit == 1:\n+ qml.PauliX(wire)\n", "issue": "Avoid quadratic scaling of template integration tests\n#### Issue description\r\n\r\nCurrently, in ``test_templates.py`` a compatibility test is performed by having every template in the library (of the same device) applied before every other one - a quadratic growth of test cases in the number of templates. This becomes prohibitive, and we should find another solution that tests templates' compatibility.\r\n\r\n#### Additional information\r\n\r\nThe issue could be easily fixed by defining small dummy templates that are called before and after the tested template. We could also try to make the tested templates pass parameters to each other.\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright 2018-2020 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nr\"\"\"\nContains the ``BasisEmbedding`` template.\n\"\"\"\n# pylint: disable-msg=too-many-branches,too-many-arguments,protected-access\nimport numpy as np\n\nfrom pennylane.templates.decorator import template\nfrom pennylane.ops import BasisState\nfrom pennylane.templates.utils import check_shape, check_wires, get_shape\n\n\n@template\ndef BasisEmbedding(features, wires):\n r\"\"\"Encodes :math:`n` binary features into a basis state of :math:`n` qubits.\n\n For example, for ``features=np.array([0, 1, 0])``, the quantum system will be\n prepared in state :math:`|010 \\rangle`.\n\n .. warning::\n\n ``BasisEmbedding`` calls a circuit whose architecture depends on the binary features.\n The ``features`` argument is therefore not differentiable when using the template, and\n gradients with respect to the argument cannot be computed by PennyLane.\n\n Args:\n features (array): binary input array of shape ``(n, )``\n wires (Sequence[int] or int): qubit indices that the template acts on\n\n Raises:\n ValueError: if inputs do not have the correct format\n \"\"\"\n\n #############\n # Input checks\n\n wires = check_wires(wires)\n\n expected_shape = (len(wires),)\n check_shape(\n features,\n expected_shape,\n msg=\"'features' must be of shape {}; got {}\" \"\".format(expected_shape, get_shape(features)),\n )\n\n if any([b not in [0, 1] for b in features]):\n raise ValueError(\"'basis_state' must only consist of 0s and 1s; got {}\".format(features))\n\n ###############\n\n features = np.array(features)\n BasisState(features, wires=wires)\n", "path": "pennylane/templates/embeddings/basis.py"}]}
1,311
316
gh_patches_debug_12916
rasdani/github-patches
git_diff
kivy__python-for-android-2469
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ctypes.util.find_library 64-bit error I was trying to use `zeroconf` package for my application. It worked perfectly on `armeabi-v7a`, however the program crashed on launch on `arm64-v8a` (both tested on Huawei P30). I have investigated the issues and discovered that the problem is with `ctypes.util.find_library` or, more precisely with the p4a module `andoroid._ctypes_library_finder` in the function `find_library`. The actual problem is that this function finds 32bit libraries regardless of the actual architecture. For example ```python ctypes.util.find_library('c') ``` returns `/system/lib/libc.so` both for 32- and 64-bit architecture. The correct behavior is to return this if Python is compiled for 32-bit and `/system/lib64/libc.so` for 64-bit one. Below is the code of a simple Kivy app that shows the issue: ```python # main.py import sys import ctypes from kivy.app import App from kivy.lang import Builder from kivy.uix.label import Label root = Builder.load_string("""\ #:import sys sys BoxLayout: orientation: 'vertical' Label: id: arch size_hint_y: 1 text_size: self.size halign: 'center' valign: 'middle' text: '64-bit' if sys.maxsize > 2**32 else '32-bit' Label: id: lib size_hint_y: 1 text_size: self.size halign: 'center' valign: 'middle' Label: id: err size_hint_y: 4 text_size: self.size halign: 'left' valign: 'middle' """) class TestCtypesApp(App): def build(self): lib = ctypes.util.find_library('c') root.ids.lib.text = str(lib) try: cdll = ctypes.CDLL(lib) except Exception as err: root.ids.err.text = "{}: {}".format(type(err).__name__, err) else: root.ids.err.text = 'CORRECT' root.ids.err.halign = 'center' return root if __name__ == '__main__': TestCtypesApp().run() ``` ```ini # buildozer.spec [app] title = Test CTypes package.name = testctypes package.domain = org.test source.dir = . source.include_exts = py version = 0.1 requirements = python3,kivy orientation = portrait osx.python_version = 3 osx.kivy_version = 1.9.1 fullscreen = 0 android.api = 30 android.arch = armeabi-v7a ios.kivy_ios_url = https://github.com/kivy/kivy-ios ios.kivy_ios_branch = master ios.ios_deploy_url = https://github.com/phonegap/ios-deploy ios.ios_deploy_branch = 1.7.0 [buildozer] log_level = 2 warn_on_root = 1 [app@arm64] android.arch = arm64-v8a ``` When compiled for `armeabi-v7a` it shows: ``` 32-bit /system/lib/libc.so CORRECT ``` while on `arm64-v8a`: ``` 64-bit /system/lib/libc.so OSError: dlopen failed: library "/system/lib/libc.so" needed or dlopened by "/data/data/org.test.testctypes/files/app/_python_bundle/modules/_ctypes.cpython-38.so" is not accessible for this namespace "classloader-namespace" ``` The expected output is: ``` 64-bit /system/lib64/libc.so CORRECT ``` The source of this problem is in the line 47 of the file [pythonforandroid/recipes/android/src/android/_ctypes_library_finder.py](../blob/develop/pythonforandroid/recipes/android/src/android/_ctypes_library_finder.py#L47). For 64-bit Python (build target arch matters, not the system archiecture), the libraries to search should be `["/system/lib64/libc.so", "/system/lib/libc.so"]`. I am also submitting a pull request resolving this issue. </issue> <code> [start of pythonforandroid/recipes/android/src/android/_ctypes_library_finder.py] 1 2 import os 3 4 5 def get_activity_lib_dir(activity_name): 6 from jnius import autoclass 7 8 # Get the actual activity instance: 9 activity_class = autoclass(activity_name) 10 if activity_class is None: 11 return None 12 activity = None 13 if hasattr(activity_class, "mActivity") and \ 14 activity_class.mActivity is not None: 15 activity = activity_class.mActivity 16 elif hasattr(activity_class, "mService") and \ 17 activity_class.mService is not None: 18 activity = activity_class.mService 19 if activity is None: 20 return None 21 22 # Extract the native lib dir from the activity instance: 23 package_name = activity.getApplicationContext().getPackageName() 24 manager = activity.getApplicationContext().getPackageManager() 25 manager_class = autoclass("android.content.pm.PackageManager") 26 native_lib_dir = manager.getApplicationInfo( 27 package_name, manager_class.GET_SHARED_LIBRARY_FILES 28 ).nativeLibraryDir 29 return native_lib_dir 30 31 32 def does_libname_match_filename(search_name, file_path): 33 # Filter file names so given search_name="mymodule" we match one of: 34 # mymodule.so (direct name + .so) 35 # libmymodule.so (added lib prefix) 36 # mymodule.arm64.so (added dot-separated middle parts) 37 # mymodule.so.1.3.4 (added dot-separated version tail) 38 # and all above (all possible combinations) 39 import re 40 file_name = os.path.basename(file_path) 41 return (re.match(r"^(lib)?" + re.escape(search_name) + 42 r"\.(.*\.)?so(\.[0-9]+)*$", file_name) is not None) 43 44 45 def find_library(name): 46 # Obtain all places for native libraries: 47 lib_search_dirs = ["/system/lib"] 48 lib_dir_1 = get_activity_lib_dir("org.kivy.android.PythonActivity") 49 if lib_dir_1 is not None: 50 lib_search_dirs.insert(0, lib_dir_1) 51 lib_dir_2 = get_activity_lib_dir("org.kivy.android.PythonService") 52 if lib_dir_2 is not None and lib_dir_2 not in lib_search_dirs: 53 lib_search_dirs.insert(0, lib_dir_2) 54 55 # Now scan the lib dirs: 56 for lib_dir in [ldir for ldir in lib_search_dirs if os.path.exists(ldir)]: 57 filelist = [ 58 f for f in os.listdir(lib_dir) 59 if does_libname_match_filename(name, f) 60 ] 61 if len(filelist) > 0: 62 return os.path.join(lib_dir, filelist[0]) 63 return None 64 [end of pythonforandroid/recipes/android/src/android/_ctypes_library_finder.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pythonforandroid/recipes/android/src/android/_ctypes_library_finder.py b/pythonforandroid/recipes/android/src/android/_ctypes_library_finder.py --- a/pythonforandroid/recipes/android/src/android/_ctypes_library_finder.py +++ b/pythonforandroid/recipes/android/src/android/_ctypes_library_finder.py @@ -1,4 +1,5 @@ +import sys import os @@ -44,7 +45,10 @@ def find_library(name): # Obtain all places for native libraries: - lib_search_dirs = ["/system/lib"] + if sys.maxsize > 2**32: # 64bit-build + lib_search_dirs = ["/system/lib64", "/system/lib"] + else: + lib_search_dirs = ["/system/lib"] lib_dir_1 = get_activity_lib_dir("org.kivy.android.PythonActivity") if lib_dir_1 is not None: lib_search_dirs.insert(0, lib_dir_1)
{"golden_diff": "diff --git a/pythonforandroid/recipes/android/src/android/_ctypes_library_finder.py b/pythonforandroid/recipes/android/src/android/_ctypes_library_finder.py\n--- a/pythonforandroid/recipes/android/src/android/_ctypes_library_finder.py\n+++ b/pythonforandroid/recipes/android/src/android/_ctypes_library_finder.py\n@@ -1,4 +1,5 @@\n \n+import sys\n import os\n \n \n@@ -44,7 +45,10 @@\n \n def find_library(name):\n # Obtain all places for native libraries:\n- lib_search_dirs = [\"/system/lib\"]\n+ if sys.maxsize > 2**32: # 64bit-build\n+ lib_search_dirs = [\"/system/lib64\", \"/system/lib\"]\n+ else:\n+ lib_search_dirs = [\"/system/lib\"]\n lib_dir_1 = get_activity_lib_dir(\"org.kivy.android.PythonActivity\")\n if lib_dir_1 is not None:\n lib_search_dirs.insert(0, lib_dir_1)\n", "issue": "ctypes.util.find_library 64-bit error\nI was trying to use `zeroconf` package for my application. It worked perfectly on `armeabi-v7a`, however the program crashed on launch on `arm64-v8a` (both tested on Huawei P30).\r\n\r\nI have investigated the issues and discovered that the problem is with `ctypes.util.find_library` or, more precisely with the p4a module `andoroid._ctypes_library_finder` in the function `find_library`.\r\n\r\nThe actual problem is that this function finds 32bit libraries regardless of the actual architecture. For example\r\n\r\n```python\r\nctypes.util.find_library('c')\r\n```\r\n\r\nreturns `/system/lib/libc.so` both for 32- and 64-bit architecture. The correct behavior is to return this if Python is compiled for 32-bit and `/system/lib64/libc.so` for 64-bit one.\r\n\r\nBelow is the code of a simple Kivy app that shows the issue:\r\n\r\n```python\r\n# main.py\r\nimport sys\r\nimport ctypes\r\n\r\n\r\nfrom kivy.app import App\r\nfrom kivy.lang import Builder\r\nfrom kivy.uix.label import Label\r\n\r\n\r\nroot = Builder.load_string(\"\"\"\\\r\n#:import sys sys\r\n\r\nBoxLayout:\r\n orientation: 'vertical'\r\n Label:\r\n id: arch\r\n size_hint_y: 1\r\n text_size: self.size\r\n halign: 'center'\r\n valign: 'middle'\r\n text: '64-bit' if sys.maxsize > 2**32 else '32-bit'\r\n Label:\r\n id: lib\r\n size_hint_y: 1\r\n text_size: self.size\r\n halign: 'center'\r\n valign: 'middle'\r\n Label:\r\n id: err\r\n size_hint_y: 4\r\n text_size: self.size\r\n halign: 'left'\r\n valign: 'middle'\r\n\"\"\")\r\n\r\n\r\nclass TestCtypesApp(App):\r\n\r\n def build(self):\r\n lib = ctypes.util.find_library('c')\r\n root.ids.lib.text = str(lib)\r\n try:\r\n cdll = ctypes.CDLL(lib)\r\n except Exception as err:\r\n root.ids.err.text = \"{}: {}\".format(type(err).__name__, err)\r\n else:\r\n root.ids.err.text = 'CORRECT'\r\n root.ids.err.halign = 'center'\r\n return root\r\n\r\n\r\nif __name__ == '__main__':\r\n TestCtypesApp().run()\r\n```\r\n\r\n```ini\r\n# buildozer.spec\r\n[app]\r\ntitle = Test CTypes\r\npackage.name = testctypes\r\npackage.domain = org.test\r\nsource.dir = .\r\nsource.include_exts = py\r\nversion = 0.1\r\nrequirements = python3,kivy\r\norientation = portrait\r\nosx.python_version = 3\r\nosx.kivy_version = 1.9.1\r\nfullscreen = 0\r\nandroid.api = 30\r\nandroid.arch = armeabi-v7a\r\nios.kivy_ios_url = https://github.com/kivy/kivy-ios\r\nios.kivy_ios_branch = master\r\nios.ios_deploy_url = https://github.com/phonegap/ios-deploy\r\nios.ios_deploy_branch = 1.7.0\r\n\r\n[buildozer]\r\nlog_level = 2\r\nwarn_on_root = 1\r\n\r\n[app@arm64]\r\nandroid.arch = arm64-v8a\r\n```\r\n\r\nWhen compiled for `armeabi-v7a` it shows:\r\n\r\n```\r\n32-bit\r\n\r\n/system/lib/libc.so\r\n\r\nCORRECT\r\n```\r\n\r\nwhile on `arm64-v8a`:\r\n\r\n```\r\n64-bit\r\n\r\n/system/lib/libc.so\r\n\r\nOSError: dlopen failed: library \"/system/lib/libc.so\" needed or dlopened by\r\n\"/data/data/org.test.testctypes/files/app/_python_bundle/modules/_ctypes.cpython-38.so\"\r\nis not accessible for this namespace \"classloader-namespace\"\r\n```\r\n\r\nThe expected output is:\r\n\r\n```\r\n64-bit\r\n\r\n/system/lib64/libc.so\r\n\r\nCORRECT\r\n```\r\n\r\nThe source of this problem is in the line 47 of the file [pythonforandroid/recipes/android/src/android/_ctypes_library_finder.py](../blob/develop/pythonforandroid/recipes/android/src/android/_ctypes_library_finder.py#L47). For 64-bit Python (build target arch matters, not the system archiecture), the libraries to search should be `[\"/system/lib64/libc.so\", \"/system/lib/libc.so\"]`.\r\n\r\nI am also submitting a pull request resolving this issue.\n", "before_files": [{"content": "\nimport os\n\n\ndef get_activity_lib_dir(activity_name):\n from jnius import autoclass\n\n # Get the actual activity instance:\n activity_class = autoclass(activity_name)\n if activity_class is None:\n return None\n activity = None\n if hasattr(activity_class, \"mActivity\") and \\\n activity_class.mActivity is not None:\n activity = activity_class.mActivity\n elif hasattr(activity_class, \"mService\") and \\\n activity_class.mService is not None:\n activity = activity_class.mService\n if activity is None:\n return None\n\n # Extract the native lib dir from the activity instance:\n package_name = activity.getApplicationContext().getPackageName()\n manager = activity.getApplicationContext().getPackageManager()\n manager_class = autoclass(\"android.content.pm.PackageManager\")\n native_lib_dir = manager.getApplicationInfo(\n package_name, manager_class.GET_SHARED_LIBRARY_FILES\n ).nativeLibraryDir\n return native_lib_dir\n\n\ndef does_libname_match_filename(search_name, file_path):\n # Filter file names so given search_name=\"mymodule\" we match one of:\n # mymodule.so (direct name + .so)\n # libmymodule.so (added lib prefix)\n # mymodule.arm64.so (added dot-separated middle parts)\n # mymodule.so.1.3.4 (added dot-separated version tail)\n # and all above (all possible combinations)\n import re\n file_name = os.path.basename(file_path)\n return (re.match(r\"^(lib)?\" + re.escape(search_name) +\n r\"\\.(.*\\.)?so(\\.[0-9]+)*$\", file_name) is not None)\n\n\ndef find_library(name):\n # Obtain all places for native libraries:\n lib_search_dirs = [\"/system/lib\"]\n lib_dir_1 = get_activity_lib_dir(\"org.kivy.android.PythonActivity\")\n if lib_dir_1 is not None:\n lib_search_dirs.insert(0, lib_dir_1)\n lib_dir_2 = get_activity_lib_dir(\"org.kivy.android.PythonService\")\n if lib_dir_2 is not None and lib_dir_2 not in lib_search_dirs:\n lib_search_dirs.insert(0, lib_dir_2)\n\n # Now scan the lib dirs:\n for lib_dir in [ldir for ldir in lib_search_dirs if os.path.exists(ldir)]:\n filelist = [\n f for f in os.listdir(lib_dir)\n if does_libname_match_filename(name, f)\n ]\n if len(filelist) > 0:\n return os.path.join(lib_dir, filelist[0])\n return None\n", "path": "pythonforandroid/recipes/android/src/android/_ctypes_library_finder.py"}]}
2,206
221
gh_patches_debug_20580
rasdani/github-patches
git_diff
aio-libs-abandoned__aioredis-py-542
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add the "MKSTREAM" option to xgroup_create It's a not-yet-documented option for creating consumer groups before the stream exists: https://github.com/antirez/redis/issues/4824#issuecomment-438263421 Example signature for `StreamCommandsMixin`: ```py def xgroup_create(self, stream, group_name, latest_id='$', mkstream=False): ``` </issue> <code> [start of aioredis/commands/streams.py] 1 from collections import OrderedDict 2 3 from aioredis.util import wait_convert, wait_make_dict, wait_ok 4 5 6 def fields_to_dict(fields, type_=OrderedDict): 7 """Convert a flat list of key/values into an OrderedDict""" 8 fields_iterator = iter(fields) 9 return type_(zip(fields_iterator, fields_iterator)) 10 11 12 def parse_messages(messages): 13 """ Parse messages as returned by Redis into something useful 14 15 Messages returned by XRANGE arrive in the form: 16 17 [ 18 [message_id, [key1, value1, key2, value2, ...]], 19 ... 20 ] 21 22 Here we parse this into: 23 24 [ 25 [message_id, OrderedDict( 26 (key1, value1), 27 (key2, value2), 28 ... 29 )], 30 ... 31 ] 32 33 """ 34 if messages is None: 35 return [] 36 return [(mid, fields_to_dict(values)) for mid, values in messages] 37 38 39 def parse_messages_by_stream(messages_by_stream): 40 """ Parse messages returned by stream 41 42 Messages returned by XREAD arrive in the form: 43 [stream_name, 44 [ 45 [message_id, [key1, value1, key2, value2, ...]], 46 ... 47 ], 48 ... 49 ] 50 51 Here we parse this into (with the help of the above parse_messages() 52 function): 53 54 [ 55 [stream_name, message_id, OrderedDict( 56 (key1, value1), 57 (key2, value2),. 58 ... 59 )], 60 ... 61 ] 62 63 """ 64 if messages_by_stream is None: 65 return [] 66 67 parsed = [] 68 for stream, messages in messages_by_stream: 69 for message_id, fields in parse_messages(messages): 70 parsed.append((stream, message_id, fields)) 71 return parsed 72 73 74 def parse_lists_to_dicts(lists): 75 """ Convert [[a, 1, b, 2], ...] into [{a:1, b: 2}, ...]""" 76 return [fields_to_dict(l, type_=dict) for l in lists] 77 78 79 class StreamCommandsMixin: 80 """Stream commands mixin 81 82 Streams are under development in Redis and 83 not currently released. 84 """ 85 86 def xadd(self, stream, fields, message_id=b'*', max_len=None, 87 exact_len=False): 88 """Add a message to a stream.""" 89 args = [] 90 if max_len is not None: 91 if exact_len: 92 args.extend((b'MAXLEN', max_len)) 93 else: 94 args.extend((b'MAXLEN', b'~', max_len)) 95 96 args.append(message_id) 97 98 for k, v in fields.items(): 99 args.extend([k, v]) 100 return self.execute(b'XADD', stream, *args) 101 102 def xrange(self, stream, start='-', stop='+', count=None): 103 """Retrieve messages from a stream.""" 104 if count is not None: 105 extra = ['COUNT', count] 106 else: 107 extra = [] 108 fut = self.execute(b'XRANGE', stream, start, stop, *extra) 109 return wait_convert(fut, parse_messages) 110 111 def xrevrange(self, stream, start='+', stop='-', count=None): 112 """Retrieve messages from a stream in reverse order.""" 113 if count is not None: 114 extra = ['COUNT', count] 115 else: 116 extra = [] 117 fut = self.execute(b'XREVRANGE', stream, start, stop, *extra) 118 return wait_convert(fut, parse_messages) 119 120 def xread(self, streams, timeout=0, count=None, latest_ids=None): 121 """Perform a blocking read on the given stream 122 123 :raises ValueError: if the length of streams and latest_ids do 124 not match 125 """ 126 args = self._xread(streams, timeout, count, latest_ids) 127 fut = self.execute(b'XREAD', *args) 128 return wait_convert(fut, parse_messages_by_stream) 129 130 def xread_group(self, group_name, consumer_name, streams, timeout=0, 131 count=None, latest_ids=None): 132 """Perform a blocking read on the given stream as part of a consumer group 133 134 :raises ValueError: if the length of streams and latest_ids do 135 not match 136 """ 137 args = self._xread(streams, timeout, count, latest_ids) 138 fut = self.execute( 139 b'XREADGROUP', b'GROUP', group_name, consumer_name, *args 140 ) 141 return wait_convert(fut, parse_messages_by_stream) 142 143 def xgroup_create(self, stream, group_name, latest_id='$'): 144 """Create a consumer group""" 145 fut = self.execute(b'XGROUP', b'CREATE', stream, group_name, latest_id) 146 return wait_ok(fut) 147 148 def xgroup_setid(self, stream, group_name, latest_id='$'): 149 """Set the latest ID for a consumer group""" 150 fut = self.execute(b'XGROUP', b'SETID', stream, group_name, latest_id) 151 return wait_ok(fut) 152 153 def xgroup_destroy(self, stream, group_name): 154 """Delete a consumer group""" 155 fut = self.execute(b'XGROUP', b'DESTROY', stream, group_name) 156 return wait_ok(fut) 157 158 def xgroup_delconsumer(self, stream, group_name, consumer_name): 159 """Delete a specific consumer from a group""" 160 fut = self.execute( 161 b'XGROUP', b'DELCONSUMER', stream, group_name, consumer_name 162 ) 163 return wait_convert(fut, int) 164 165 def xpending(self, stream, group_name, start=None, stop=None, count=None, 166 consumer=None): 167 """Get information on pending messages for a stream 168 169 Returned data will vary depending on the presence (or not) 170 of the start/stop/count parameters. For more details see: 171 https://redis.io/commands/xpending 172 173 :raises ValueError: if the start/stop/count parameters are only 174 partially specified 175 """ 176 # Returns: total pel messages, min id, max id, count 177 ssc = [start, stop, count] 178 ssc_count = len([v for v in ssc if v is not None]) 179 if ssc_count != 3 and ssc_count != 0: 180 raise ValueError( 181 'Either specify non or all of the start/stop/count arguments' 182 ) 183 if not any(ssc): 184 ssc = [] 185 186 args = [stream, group_name] + ssc 187 if consumer: 188 args.append(consumer) 189 return self.execute(b'XPENDING', *args) 190 191 def xclaim(self, stream, group_name, consumer_name, min_idle_time, 192 id, *ids): 193 """Claim a message for a given consumer""" 194 fut = self.execute( 195 b'XCLAIM', stream, group_name, consumer_name, min_idle_time, 196 id, *ids 197 ) 198 return wait_convert(fut, parse_messages) 199 200 def xack(self, stream, group_name, id, *ids): 201 """Acknowledge a message for a given consumer group""" 202 return self.execute(b'XACK', stream, group_name, id, *ids) 203 204 def xinfo(self, stream): 205 """Retrieve information about the given stream. 206 207 An alias for ``xinfo_stream()`` 208 """ 209 return self.xinfo_stream(stream) 210 211 def xinfo_consumers(self, stream, group_name): 212 """Retrieve consumers of a consumer group""" 213 fut = self.execute(b'XINFO', b'CONSUMERS', stream, group_name) 214 215 return wait_convert(fut, parse_lists_to_dicts) 216 217 def xinfo_groups(self, stream): 218 """Retrieve the consumer groups for a stream""" 219 fut = self.execute(b'XINFO', b'GROUPS', stream) 220 return wait_convert(fut, parse_lists_to_dicts) 221 222 def xinfo_stream(self, stream): 223 """Retrieve information about the given stream.""" 224 fut = self.execute(b'XINFO', b'STREAM', stream) 225 return wait_make_dict(fut) 226 227 def xinfo_help(self): 228 """Retrieve help regarding the ``XINFO`` sub-commands""" 229 fut = self.execute(b'XINFO', b'HELP') 230 return wait_convert(fut, lambda l: b'\n'.join(l)) 231 232 def _xread(self, streams, timeout=0, count=None, latest_ids=None): 233 """Wraps up common functionality between ``xread()`` 234 and ``xread_group()`` 235 236 You should probably be using ``xread()`` or ``xread_group()`` directly. 237 """ 238 if latest_ids is None: 239 latest_ids = ['$'] * len(streams) 240 if len(streams) != len(latest_ids): 241 raise ValueError( 242 'The streams and latest_ids parameters must be of the ' 243 'same length' 244 ) 245 246 count_args = [b'COUNT', count] if count else [] 247 if timeout is None: 248 block_args = [] 249 else: 250 block_args = [b'BLOCK', timeout] 251 return block_args + count_args + [b'STREAMS'] + streams + latest_ids 252 [end of aioredis/commands/streams.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/aioredis/commands/streams.py b/aioredis/commands/streams.py --- a/aioredis/commands/streams.py +++ b/aioredis/commands/streams.py @@ -79,8 +79,7 @@ class StreamCommandsMixin: """Stream commands mixin - Streams are under development in Redis and - not currently released. + Streams are available in Redis since v5.0 """ def xadd(self, stream, fields, message_id=b'*', max_len=None, @@ -140,9 +139,12 @@ ) return wait_convert(fut, parse_messages_by_stream) - def xgroup_create(self, stream, group_name, latest_id='$'): + def xgroup_create(self, stream, group_name, latest_id='$', mkstream=False): """Create a consumer group""" - fut = self.execute(b'XGROUP', b'CREATE', stream, group_name, latest_id) + args = [b'CREATE', stream, group_name, latest_id] + if mkstream: + args.append(b'MKSTREAM') + fut = self.execute(b'XGROUP', *args) return wait_ok(fut) def xgroup_setid(self, stream, group_name, latest_id='$'):
{"golden_diff": "diff --git a/aioredis/commands/streams.py b/aioredis/commands/streams.py\n--- a/aioredis/commands/streams.py\n+++ b/aioredis/commands/streams.py\n@@ -79,8 +79,7 @@\n class StreamCommandsMixin:\n \"\"\"Stream commands mixin\n \n- Streams are under development in Redis and\n- not currently released.\n+ Streams are available in Redis since v5.0\n \"\"\"\n \n def xadd(self, stream, fields, message_id=b'*', max_len=None,\n@@ -140,9 +139,12 @@\n )\n return wait_convert(fut, parse_messages_by_stream)\n \n- def xgroup_create(self, stream, group_name, latest_id='$'):\n+ def xgroup_create(self, stream, group_name, latest_id='$', mkstream=False):\n \"\"\"Create a consumer group\"\"\"\n- fut = self.execute(b'XGROUP', b'CREATE', stream, group_name, latest_id)\n+ args = [b'CREATE', stream, group_name, latest_id]\n+ if mkstream:\n+ args.append(b'MKSTREAM')\n+ fut = self.execute(b'XGROUP', *args)\n return wait_ok(fut)\n \n def xgroup_setid(self, stream, group_name, latest_id='$'):\n", "issue": "Add the \"MKSTREAM\" option to xgroup_create\nIt's a not-yet-documented option for creating consumer groups before the stream exists: https://github.com/antirez/redis/issues/4824#issuecomment-438263421\r\n\r\nExample signature for `StreamCommandsMixin`:\r\n```py\r\n def xgroup_create(self, stream, group_name, latest_id='$', mkstream=False):\r\n```\n", "before_files": [{"content": "from collections import OrderedDict\n\nfrom aioredis.util import wait_convert, wait_make_dict, wait_ok\n\n\ndef fields_to_dict(fields, type_=OrderedDict):\n \"\"\"Convert a flat list of key/values into an OrderedDict\"\"\"\n fields_iterator = iter(fields)\n return type_(zip(fields_iterator, fields_iterator))\n\n\ndef parse_messages(messages):\n \"\"\" Parse messages as returned by Redis into something useful\n\n Messages returned by XRANGE arrive in the form:\n\n [\n [message_id, [key1, value1, key2, value2, ...]],\n ...\n ]\n\n Here we parse this into:\n\n [\n [message_id, OrderedDict(\n (key1, value1),\n (key2, value2),\n ...\n )],\n ...\n ]\n\n \"\"\"\n if messages is None:\n return []\n return [(mid, fields_to_dict(values)) for mid, values in messages]\n\n\ndef parse_messages_by_stream(messages_by_stream):\n \"\"\" Parse messages returned by stream\n\n Messages returned by XREAD arrive in the form:\n [stream_name,\n [\n [message_id, [key1, value1, key2, value2, ...]],\n ...\n ],\n ...\n ]\n\n Here we parse this into (with the help of the above parse_messages()\n function):\n\n [\n [stream_name, message_id, OrderedDict(\n (key1, value1),\n (key2, value2),.\n ...\n )],\n ...\n ]\n\n \"\"\"\n if messages_by_stream is None:\n return []\n\n parsed = []\n for stream, messages in messages_by_stream:\n for message_id, fields in parse_messages(messages):\n parsed.append((stream, message_id, fields))\n return parsed\n\n\ndef parse_lists_to_dicts(lists):\n \"\"\" Convert [[a, 1, b, 2], ...] into [{a:1, b: 2}, ...]\"\"\"\n return [fields_to_dict(l, type_=dict) for l in lists]\n\n\nclass StreamCommandsMixin:\n \"\"\"Stream commands mixin\n\n Streams are under development in Redis and\n not currently released.\n \"\"\"\n\n def xadd(self, stream, fields, message_id=b'*', max_len=None,\n exact_len=False):\n \"\"\"Add a message to a stream.\"\"\"\n args = []\n if max_len is not None:\n if exact_len:\n args.extend((b'MAXLEN', max_len))\n else:\n args.extend((b'MAXLEN', b'~', max_len))\n\n args.append(message_id)\n\n for k, v in fields.items():\n args.extend([k, v])\n return self.execute(b'XADD', stream, *args)\n\n def xrange(self, stream, start='-', stop='+', count=None):\n \"\"\"Retrieve messages from a stream.\"\"\"\n if count is not None:\n extra = ['COUNT', count]\n else:\n extra = []\n fut = self.execute(b'XRANGE', stream, start, stop, *extra)\n return wait_convert(fut, parse_messages)\n\n def xrevrange(self, stream, start='+', stop='-', count=None):\n \"\"\"Retrieve messages from a stream in reverse order.\"\"\"\n if count is not None:\n extra = ['COUNT', count]\n else:\n extra = []\n fut = self.execute(b'XREVRANGE', stream, start, stop, *extra)\n return wait_convert(fut, parse_messages)\n\n def xread(self, streams, timeout=0, count=None, latest_ids=None):\n \"\"\"Perform a blocking read on the given stream\n\n :raises ValueError: if the length of streams and latest_ids do\n not match\n \"\"\"\n args = self._xread(streams, timeout, count, latest_ids)\n fut = self.execute(b'XREAD', *args)\n return wait_convert(fut, parse_messages_by_stream)\n\n def xread_group(self, group_name, consumer_name, streams, timeout=0,\n count=None, latest_ids=None):\n \"\"\"Perform a blocking read on the given stream as part of a consumer group\n\n :raises ValueError: if the length of streams and latest_ids do\n not match\n \"\"\"\n args = self._xread(streams, timeout, count, latest_ids)\n fut = self.execute(\n b'XREADGROUP', b'GROUP', group_name, consumer_name, *args\n )\n return wait_convert(fut, parse_messages_by_stream)\n\n def xgroup_create(self, stream, group_name, latest_id='$'):\n \"\"\"Create a consumer group\"\"\"\n fut = self.execute(b'XGROUP', b'CREATE', stream, group_name, latest_id)\n return wait_ok(fut)\n\n def xgroup_setid(self, stream, group_name, latest_id='$'):\n \"\"\"Set the latest ID for a consumer group\"\"\"\n fut = self.execute(b'XGROUP', b'SETID', stream, group_name, latest_id)\n return wait_ok(fut)\n\n def xgroup_destroy(self, stream, group_name):\n \"\"\"Delete a consumer group\"\"\"\n fut = self.execute(b'XGROUP', b'DESTROY', stream, group_name)\n return wait_ok(fut)\n\n def xgroup_delconsumer(self, stream, group_name, consumer_name):\n \"\"\"Delete a specific consumer from a group\"\"\"\n fut = self.execute(\n b'XGROUP', b'DELCONSUMER', stream, group_name, consumer_name\n )\n return wait_convert(fut, int)\n\n def xpending(self, stream, group_name, start=None, stop=None, count=None,\n consumer=None):\n \"\"\"Get information on pending messages for a stream\n\n Returned data will vary depending on the presence (or not)\n of the start/stop/count parameters. For more details see:\n https://redis.io/commands/xpending\n\n :raises ValueError: if the start/stop/count parameters are only\n partially specified\n \"\"\"\n # Returns: total pel messages, min id, max id, count\n ssc = [start, stop, count]\n ssc_count = len([v for v in ssc if v is not None])\n if ssc_count != 3 and ssc_count != 0:\n raise ValueError(\n 'Either specify non or all of the start/stop/count arguments'\n )\n if not any(ssc):\n ssc = []\n\n args = [stream, group_name] + ssc\n if consumer:\n args.append(consumer)\n return self.execute(b'XPENDING', *args)\n\n def xclaim(self, stream, group_name, consumer_name, min_idle_time,\n id, *ids):\n \"\"\"Claim a message for a given consumer\"\"\"\n fut = self.execute(\n b'XCLAIM', stream, group_name, consumer_name, min_idle_time,\n id, *ids\n )\n return wait_convert(fut, parse_messages)\n\n def xack(self, stream, group_name, id, *ids):\n \"\"\"Acknowledge a message for a given consumer group\"\"\"\n return self.execute(b'XACK', stream, group_name, id, *ids)\n\n def xinfo(self, stream):\n \"\"\"Retrieve information about the given stream.\n\n An alias for ``xinfo_stream()``\n \"\"\"\n return self.xinfo_stream(stream)\n\n def xinfo_consumers(self, stream, group_name):\n \"\"\"Retrieve consumers of a consumer group\"\"\"\n fut = self.execute(b'XINFO', b'CONSUMERS', stream, group_name)\n\n return wait_convert(fut, parse_lists_to_dicts)\n\n def xinfo_groups(self, stream):\n \"\"\"Retrieve the consumer groups for a stream\"\"\"\n fut = self.execute(b'XINFO', b'GROUPS', stream)\n return wait_convert(fut, parse_lists_to_dicts)\n\n def xinfo_stream(self, stream):\n \"\"\"Retrieve information about the given stream.\"\"\"\n fut = self.execute(b'XINFO', b'STREAM', stream)\n return wait_make_dict(fut)\n\n def xinfo_help(self):\n \"\"\"Retrieve help regarding the ``XINFO`` sub-commands\"\"\"\n fut = self.execute(b'XINFO', b'HELP')\n return wait_convert(fut, lambda l: b'\\n'.join(l))\n\n def _xread(self, streams, timeout=0, count=None, latest_ids=None):\n \"\"\"Wraps up common functionality between ``xread()``\n and ``xread_group()``\n\n You should probably be using ``xread()`` or ``xread_group()`` directly.\n \"\"\"\n if latest_ids is None:\n latest_ids = ['$'] * len(streams)\n if len(streams) != len(latest_ids):\n raise ValueError(\n 'The streams and latest_ids parameters must be of the '\n 'same length'\n )\n\n count_args = [b'COUNT', count] if count else []\n if timeout is None:\n block_args = []\n else:\n block_args = [b'BLOCK', timeout]\n return block_args + count_args + [b'STREAMS'] + streams + latest_ids\n", "path": "aioredis/commands/streams.py"}]}
3,242
291
gh_patches_debug_3911
rasdani/github-patches
git_diff
esphome__esphome-docs-919
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Fix documentation typo on Sensirion SPS30 ## Description: Fix documentation typo on Sensirion SPS30 </issue> <code> [start of conf.py] 1 #!/usr/bin/env python3 2 # -*- coding: utf-8 -*- 3 # 4 # esphome documentation build configuration file, created by 5 # sphinx-quickstart on Mon Jan 22 21:44:07 2018. 6 # 7 # This file is execfile()d with the current directory set to its 8 # containing dir. 9 # 10 # Note that not all possible configuration values are present in this 11 # autogenerated file. 12 # 13 # All configuration values have a default; values that are commented out 14 # serve to show the default. 15 16 # If extensions (or modules to document with autodoc) are in another directory, 17 # add these directories to sys.path here. If the directory is relative to the 18 # documentation root, use os.path.abspath to make it absolute, like shown here. 19 # 20 # import os 21 # import sys 22 # sys.path.insert(0, os.path.abspath('.')) 23 import hashlib 24 import os 25 import subprocess 26 from sphinx import addnodes 27 from sphinx.util.docfields import Field, GroupedField 28 import re 29 import sys 30 31 32 sys.path.append(os.path.abspath('.')) 33 34 # -- General configuration ------------------------------------------------ 35 36 # If your documentation needs a minimal Sphinx version, state it here. 37 # 38 # needs_sphinx = '1.0' 39 40 # Add any Sphinx extension module names here, as strings. They can be 41 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 42 # ones. 43 extensions = [ 44 'github', 45 'seo', 46 'sitemap', 47 ] 48 49 # Add any paths that contain templates here, relative to this directory. 50 templates_path = ['_templates'] 51 52 # The suffix(es) of source filenames. 53 # You can specify multiple suffix as a list of string: 54 # 55 # source_suffix = ['.rst', '.md'] 56 source_suffix = '.rst' 57 58 # The master toctree document. 59 master_doc = 'index' 60 61 # General information about the project. 62 project = 'ESPHome' 63 copyright = '2019, Otto Winter' 64 html_show_copyright = False 65 html_show_sphinx = False 66 author = 'Otto Winter' 67 68 # The version info for the project you're documenting, acts as replacement for 69 # |version| and |release|, also used in various other places throughout the 70 # built documents. 71 # 72 # The short X.Y version. 73 version = '1.16' 74 # The full version, including alpha/beta/rc tags. 75 release = '1.16.0-dev' 76 77 # The language for content autogenerated by Sphinx. Refer to documentation 78 # for a list of supported languages. 79 # 80 # This is also used if you do content translation via gettext catalogs. 81 # Usually you set "language" from the command line for these cases. 82 language = 'en' 83 84 # List of patterns, relative to source directory, that match files and 85 # directories to ignore when looking for source files. 86 # This patterns also effect to html_static_path and html_extra_path 87 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] 88 89 # The reST default role (used for this markup: `text`) to use for all documents. 90 # default_role = 'cpp:any' 91 92 # The name of the Pygments (syntax highlighting) style to use. 93 pygments_style = 'xcode' 94 95 highlight_language = 'yaml' 96 97 primary_domain = None 98 99 # If true, `todo` and `todoList` produce output, else they produce nothing. 100 todo_include_todos = False 101 102 103 # -- Options for HTML output ---------------------------------------------- 104 105 # The theme to use for HTML and HTML Help pages. See the documentation for 106 # a list of builtin themes. 107 # 108 html_theme = 'alabaster' 109 110 # Theme options are theme-specific and customize the look and feel of a theme 111 # further. For a list of options available for each theme, see the 112 # documentation. 113 # 114 html_baseurl = os.getenv('BASE_URL', 'https://esphome.io') 115 with open('_static/custom.css', 'rb') as f: 116 custom_css_hash = hashlib.md5(f.read()).hexdigest()[:8] 117 118 html_theme_options = { 119 # 'logo': 'logo-full.png', 120 'logo_name': False, 121 'show_related': False, 122 'sidebar_collapse': True, 123 'fixed_sidebar': True, 124 'show_powered_by': False, 125 } 126 127 html_context = { 128 'custom_css_hash': custom_css_hash, 129 } 130 131 html_logo = 'images/logo-text.svg' 132 html_copy_source = True 133 html_show_sourcelink = False 134 html_last_updated_fmt = None 135 html_use_smartypants = False 136 html_title = 'ESPHome' 137 138 # Add any paths that contain custom static files (such as style sheets) here, 139 # relative to this directory. They are copied after the builtin static files, 140 # so a file named "default.css" will overwrite the builtin "default.css". 141 html_static_path = ['_static'] 142 143 # Custom sidebar templates, must be a dictionary that maps document names 144 # to template names. 145 # 146 # This is required for the alabaster theme 147 # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars 148 html_sidebars = { 149 '**': [ 150 # 'about.html', 151 'searchbox.html', 152 'localtoc.html', 153 ] 154 } 155 156 157 # -- Options for HTMLHelp output ------------------------------------------ 158 159 # Output file base name for HTML help builder. 160 htmlhelp_basename = 'esphomedoc' 161 162 163 # -- Options for LaTeX output --------------------------------------------- 164 165 latex_elements = { 166 # The paper size ('letterpaper' or 'a4paper'). 167 # 168 # 'papersize': 'letterpaper', 169 170 # The font size ('10pt', '11pt' or '12pt'). 171 # 172 # 'pointsize': '10pt', 173 174 # Additional stuff for the LaTeX preamble. 175 # 176 # 'preamble': '', 177 178 # Latex figure (float) alignment 179 # 180 # 'figure_align': 'htbp', 181 } 182 183 # Grouping the document tree into LaTeX files. List of tuples 184 # (source start file, target name, title, 185 # author, documentclass [howto, manual, or own class]). 186 latex_documents = [ 187 (master_doc, 'esphome.tex', 'ESPHome Documentation', 188 'Otto Winter', 'manual'), 189 ] 190 191 latex_engine = 'xelatex' 192 193 194 # -- Options for manual page output --------------------------------------- 195 196 # One entry per manual page. List of tuples 197 # (source start file, name, description, authors, manual section). 198 man_pages = [ 199 (master_doc, 'esphome', 'ESPHome Documentation', 200 [author], 1) 201 ] 202 203 204 # -- Options for Texinfo output ------------------------------------------- 205 206 # Grouping the document tree into Texinfo files. List of tuples 207 # (source start file, target name, title, author, 208 # dir menu entry, description, category) 209 texinfo_documents = [ 210 (master_doc, 'esphome', 'ESPHome Documentation', 211 author, 'esphome', 'One line description of project.', 212 'Miscellaneous'), 213 ] 214 linkcheck_ignore = [r'https://github.com/.*', r'https://discord.gg/.*'] 215 [end of conf.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/conf.py b/conf.py --- a/conf.py +++ b/conf.py @@ -70,9 +70,9 @@ # built documents. # # The short X.Y version. -version = '1.16' +version = '1.17' # The full version, including alpha/beta/rc tags. -release = '1.16.0-dev' +release = '1.17.0-dev' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages.
{"golden_diff": "diff --git a/conf.py b/conf.py\n--- a/conf.py\n+++ b/conf.py\n@@ -70,9 +70,9 @@\n # built documents.\n #\n # The short X.Y version.\n-version = '1.16'\n+version = '1.17'\n # The full version, including alpha/beta/rc tags.\n-release = '1.16.0-dev'\n+release = '1.17.0-dev'\n \n # The language for content autogenerated by Sphinx. Refer to documentation\n # for a list of supported languages.\n", "issue": "Fix documentation typo on Sensirion SPS30\n## Description:\r\n\r\nFix documentation typo on Sensirion SPS30\n", "before_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# esphome documentation build configuration file, created by\n# sphinx-quickstart on Mon Jan 22 21:44:07 2018.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\nimport hashlib\nimport os\nimport subprocess\nfrom sphinx import addnodes\nfrom sphinx.util.docfields import Field, GroupedField\nimport re\nimport sys\n\n\nsys.path.append(os.path.abspath('.'))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'github',\n 'seo',\n 'sitemap',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'ESPHome'\ncopyright = '2019, Otto Winter'\nhtml_show_copyright = False\nhtml_show_sphinx = False\nauthor = 'Otto Winter'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '1.16'\n# The full version, including alpha/beta/rc tags.\nrelease = '1.16.0-dev'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = 'en'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n# default_role = 'cpp:any'\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'xcode'\n\nhighlight_language = 'yaml'\n\nprimary_domain = None\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'alabaster'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_baseurl = os.getenv('BASE_URL', 'https://esphome.io')\nwith open('_static/custom.css', 'rb') as f:\n custom_css_hash = hashlib.md5(f.read()).hexdigest()[:8]\n\nhtml_theme_options = {\n # 'logo': 'logo-full.png',\n 'logo_name': False,\n 'show_related': False,\n 'sidebar_collapse': True,\n 'fixed_sidebar': True,\n 'show_powered_by': False,\n}\n\nhtml_context = {\n 'custom_css_hash': custom_css_hash,\n}\n\nhtml_logo = 'images/logo-text.svg'\nhtml_copy_source = True\nhtml_show_sourcelink = False\nhtml_last_updated_fmt = None\nhtml_use_smartypants = False\nhtml_title = 'ESPHome'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# This is required for the alabaster theme\n# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars\nhtml_sidebars = {\n '**': [\n # 'about.html',\n 'searchbox.html',\n 'localtoc.html',\n ]\n}\n\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'esphomedoc'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'esphome.tex', 'ESPHome Documentation',\n 'Otto Winter', 'manual'),\n]\n\nlatex_engine = 'xelatex'\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'esphome', 'ESPHome Documentation',\n [author], 1)\n]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'esphome', 'ESPHome Documentation',\n author, 'esphome', 'One line description of project.',\n 'Miscellaneous'),\n]\nlinkcheck_ignore = [r'https://github.com/.*', r'https://discord.gg/.*']\n", "path": "conf.py"}]}
2,623
118
gh_patches_debug_4282
rasdani/github-patches
git_diff
akvo__akvo-rsr-3257
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Remove scrollbars when they are not necessary The home page shows a scroll bar even when there is nothing to scroll ![image](https://user-images.githubusercontent.com/315678/41148811-6578191a-6af9-11e8-8469-cfdcaee737ad.png) ![image](https://user-images.githubusercontent.com/315678/41148904-a4713a02-6af9-11e8-9873-1cca3c4cc8a3.png) </issue> <code> [start of akvo/rest/views/organisation.py] 1 # -*- coding: utf-8 -*- 2 3 # Akvo RSR is covered by the GNU Affero General Public License. 4 # See more details in the license.txt file located at the root folder of the Akvo RSR module. 5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >. 6 7 from django.conf import settings 8 from django.db.models import Q 9 from django.utils import six 10 from rest_framework.decorators import api_view 11 from rest_framework.exceptions import ParseError 12 from rest_framework.parsers import JSONParser 13 from rest_framework.response import Response 14 from rest_framework_xml.parsers import XMLParser 15 from rest_framework_xml.compat import etree 16 17 from akvo.rest.views.utils import int_or_none, get_qs_elements_for_page 18 from akvo.rsr.filters import location_choices, get_m49_filter 19 from akvo.rsr.models import Project, Organisation, Country 20 from akvo.rsr.views.utils import apply_keywords, org_projects 21 from ..serializers import OrganisationSerializer, OrganisationDirectorySerializer 22 from ..viewsets import BaseRSRViewSet 23 24 25 class AkvoOrganisationParser(XMLParser): 26 def parse(self, stream, media_type=None, parser_context=None): 27 assert etree, 'XMLParser requires defusedxml to be installed' 28 29 parser_context = parser_context or {} 30 encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET) 31 parser = etree.DefusedXMLParser(encoding=encoding) 32 try: 33 tree = etree.parse(stream, parser=parser, forbid_dtd=True) 34 except (etree.ParseError, ValueError) as exc: 35 raise ParseError('XML parse error - %s' % six.text_type(exc)) 36 return self.organisation_data_from_etree(tree.getroot()) 37 38 def organisation_data_from_etree(self, tree): 39 def find_text(tree, str): 40 element = tree.find(str) 41 if element is None: 42 return '' 43 return element.text.strip() if element.text else "" 44 45 def location_data(location_tree): 46 if location_tree is None: 47 return [] 48 iso_code = find_text(location_tree, 'iso_code').lower() 49 country, created = Country.objects.get_or_create(**Country.fields_from_iso_code(iso_code)) 50 country = country.id 51 latitude = find_text(location_tree, 'latitude') or 0 52 longitude = find_text(location_tree, 'longitude') or 0 53 primary = True 54 return [dict(latitude=latitude, longitude=longitude, country=country, primary=primary)] 55 56 long_name = find_text(tree, 'name') 57 name = long_name[:25] 58 description = find_text(tree, 'description') 59 url = find_text(tree, 'url') 60 iati_type = find_text(tree, 'iati_organisation_type') 61 new_organisation_type = int(iati_type) if iati_type else 22 62 organisation_type = Organisation.org_type_from_iati_type(new_organisation_type) 63 locations = location_data(tree.find('location/object')) 64 return dict( 65 name=name, long_name=long_name, description=description, url=url, 66 organisation_type=organisation_type, new_organisation_type=new_organisation_type, 67 locations=locations 68 ) 69 70 71 class OrganisationViewSet(BaseRSRViewSet): 72 """ 73 API endpoint that allows organisations to be viewed or edited. 74 """ 75 queryset = Organisation.objects.all() 76 serializer_class = OrganisationSerializer 77 parser_classes = (AkvoOrganisationParser, JSONParser,) 78 79 80 @api_view(['GET']) 81 def organisation_directory(request): 82 """REST view for the update directory.""" 83 84 page = request.rsr_page 85 all_organisations = Organisation.objects.all() if not page else _page_organisations(page) 86 87 # Filter updates based on query parameters 88 filter_, text_filter = _create_filters_query(request) 89 organisations = ( 90 all_organisations.filter(filter_).distinct() if filter_ is not None else all_organisations 91 ) 92 organisations_text_filtered = ( 93 organisations.filter(text_filter) if text_filter is not None else organisations 94 ) 95 if organisations_text_filtered.exists(): 96 organisations = organisations_text_filtered 97 98 # Get the relevant data for typeaheads based on filtered organisations (minus 99 # text filtering, if no organisations were found) 100 locations = [ 101 {'id': choice[0], 'name': choice[1]} 102 for choice in location_choices(organisations) 103 ] 104 105 display_organisations = get_qs_elements_for_page(organisations_text_filtered, request) 106 107 # Get related objects of page at once 108 response = { 109 'project_count': all_organisations.count(), 110 'projects': OrganisationDirectorySerializer(display_organisations, many=True).data, 111 'location': locations, 112 } 113 return Response(response) 114 115 116 def _public_projects(): 117 """Return all public projects.""" 118 return Project.objects.public().published().select_related('partners') 119 120 121 def _page_organisations(page): 122 """Dig out the list or organisations to use.""" 123 projects = org_projects(page.organisation) if page.partner_projects else _public_projects() 124 keyword_projects = apply_keywords(page, projects) 125 return keyword_projects.all_partners() 126 127 128 def _create_filters_query(request): 129 """Returns a Q object expression based on query parameters.""" 130 location_param = int_or_none(request.GET.get('location')) 131 title_or_subtitle_param = request.GET.get('title_or_subtitle') 132 133 location_filter = ( 134 get_m49_filter(location_param, use_recipient_country=False) if location_param else None 135 ) 136 title_filter = ( 137 Q(name__icontains=title_or_subtitle_param) | 138 Q(long_name__icontains=title_or_subtitle_param) 139 ) if title_or_subtitle_param else None 140 all_filters = [ 141 location_filter, 142 ] 143 filters = filter(None, all_filters) 144 return reduce(lambda x, y: x & y, filters) if filters else None, title_filter 145 [end of akvo/rest/views/organisation.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/akvo/rest/views/organisation.py b/akvo/rest/views/organisation.py --- a/akvo/rest/views/organisation.py +++ b/akvo/rest/views/organisation.py @@ -106,7 +106,7 @@ # Get related objects of page at once response = { - 'project_count': all_organisations.count(), + 'project_count': organisations_text_filtered.count(), 'projects': OrganisationDirectorySerializer(display_organisations, many=True).data, 'location': locations, }
{"golden_diff": "diff --git a/akvo/rest/views/organisation.py b/akvo/rest/views/organisation.py\n--- a/akvo/rest/views/organisation.py\n+++ b/akvo/rest/views/organisation.py\n@@ -106,7 +106,7 @@\n \n # Get related objects of page at once\n response = {\n- 'project_count': all_organisations.count(),\n+ 'project_count': organisations_text_filtered.count(),\n 'projects': OrganisationDirectorySerializer(display_organisations, many=True).data,\n 'location': locations,\n }\n", "issue": "Remove scrollbars when they are not necessary\nThe home page shows a scroll bar even when there is nothing to scroll\r\n\r\n![image](https://user-images.githubusercontent.com/315678/41148811-6578191a-6af9-11e8-8469-cfdcaee737ad.png)\r\n\r\n![image](https://user-images.githubusercontent.com/315678/41148904-a4713a02-6af9-11e8-9873-1cca3c4cc8a3.png)\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom django.conf import settings\nfrom django.db.models import Q\nfrom django.utils import six\nfrom rest_framework.decorators import api_view\nfrom rest_framework.exceptions import ParseError\nfrom rest_framework.parsers import JSONParser\nfrom rest_framework.response import Response\nfrom rest_framework_xml.parsers import XMLParser\nfrom rest_framework_xml.compat import etree\n\nfrom akvo.rest.views.utils import int_or_none, get_qs_elements_for_page\nfrom akvo.rsr.filters import location_choices, get_m49_filter\nfrom akvo.rsr.models import Project, Organisation, Country\nfrom akvo.rsr.views.utils import apply_keywords, org_projects\nfrom ..serializers import OrganisationSerializer, OrganisationDirectorySerializer\nfrom ..viewsets import BaseRSRViewSet\n\n\nclass AkvoOrganisationParser(XMLParser):\n def parse(self, stream, media_type=None, parser_context=None):\n assert etree, 'XMLParser requires defusedxml to be installed'\n\n parser_context = parser_context or {}\n encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)\n parser = etree.DefusedXMLParser(encoding=encoding)\n try:\n tree = etree.parse(stream, parser=parser, forbid_dtd=True)\n except (etree.ParseError, ValueError) as exc:\n raise ParseError('XML parse error - %s' % six.text_type(exc))\n return self.organisation_data_from_etree(tree.getroot())\n\n def organisation_data_from_etree(self, tree):\n def find_text(tree, str):\n element = tree.find(str)\n if element is None:\n return ''\n return element.text.strip() if element.text else \"\"\n\n def location_data(location_tree):\n if location_tree is None:\n return []\n iso_code = find_text(location_tree, 'iso_code').lower()\n country, created = Country.objects.get_or_create(**Country.fields_from_iso_code(iso_code))\n country = country.id\n latitude = find_text(location_tree, 'latitude') or 0\n longitude = find_text(location_tree, 'longitude') or 0\n primary = True\n return [dict(latitude=latitude, longitude=longitude, country=country, primary=primary)]\n\n long_name = find_text(tree, 'name')\n name = long_name[:25]\n description = find_text(tree, 'description')\n url = find_text(tree, 'url')\n iati_type = find_text(tree, 'iati_organisation_type')\n new_organisation_type = int(iati_type) if iati_type else 22\n organisation_type = Organisation.org_type_from_iati_type(new_organisation_type)\n locations = location_data(tree.find('location/object'))\n return dict(\n name=name, long_name=long_name, description=description, url=url,\n organisation_type=organisation_type, new_organisation_type=new_organisation_type,\n locations=locations\n )\n\n\nclass OrganisationViewSet(BaseRSRViewSet):\n \"\"\"\n API endpoint that allows organisations to be viewed or edited.\n \"\"\"\n queryset = Organisation.objects.all()\n serializer_class = OrganisationSerializer\n parser_classes = (AkvoOrganisationParser, JSONParser,)\n\n\n@api_view(['GET'])\ndef organisation_directory(request):\n \"\"\"REST view for the update directory.\"\"\"\n\n page = request.rsr_page\n all_organisations = Organisation.objects.all() if not page else _page_organisations(page)\n\n # Filter updates based on query parameters\n filter_, text_filter = _create_filters_query(request)\n organisations = (\n all_organisations.filter(filter_).distinct() if filter_ is not None else all_organisations\n )\n organisations_text_filtered = (\n organisations.filter(text_filter) if text_filter is not None else organisations\n )\n if organisations_text_filtered.exists():\n organisations = organisations_text_filtered\n\n # Get the relevant data for typeaheads based on filtered organisations (minus\n # text filtering, if no organisations were found)\n locations = [\n {'id': choice[0], 'name': choice[1]}\n for choice in location_choices(organisations)\n ]\n\n display_organisations = get_qs_elements_for_page(organisations_text_filtered, request)\n\n # Get related objects of page at once\n response = {\n 'project_count': all_organisations.count(),\n 'projects': OrganisationDirectorySerializer(display_organisations, many=True).data,\n 'location': locations,\n }\n return Response(response)\n\n\ndef _public_projects():\n \"\"\"Return all public projects.\"\"\"\n return Project.objects.public().published().select_related('partners')\n\n\ndef _page_organisations(page):\n \"\"\"Dig out the list or organisations to use.\"\"\"\n projects = org_projects(page.organisation) if page.partner_projects else _public_projects()\n keyword_projects = apply_keywords(page, projects)\n return keyword_projects.all_partners()\n\n\ndef _create_filters_query(request):\n \"\"\"Returns a Q object expression based on query parameters.\"\"\"\n location_param = int_or_none(request.GET.get('location'))\n title_or_subtitle_param = request.GET.get('title_or_subtitle')\n\n location_filter = (\n get_m49_filter(location_param, use_recipient_country=False) if location_param else None\n )\n title_filter = (\n Q(name__icontains=title_or_subtitle_param) |\n Q(long_name__icontains=title_or_subtitle_param)\n ) if title_or_subtitle_param else None\n all_filters = [\n location_filter,\n ]\n filters = filter(None, all_filters)\n return reduce(lambda x, y: x & y, filters) if filters else None, title_filter\n", "path": "akvo/rest/views/organisation.py"}]}
2,245
121
gh_patches_debug_11080
rasdani/github-patches
git_diff
pyinstaller__pyinstaller-6581
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Installation on Armv6l creates a Linux-32bit-unknown bootloader. ## Description of the issue When installing on a RPi 1b you get an incorrectly named bootloader created. The directory is called Linux-32bit-unknown and not Linux-32bit-arm. ### Context information (for bug reports) * Output of `pyinstaller --version`: ```4.8``` * Version of Python: <!-- e.g. 3.7 --> 3.7 * Platform: <!-- e.g GNU/Linux (distribution), Windows (language settings), OS X, FreeBSD --> Raspbian GNU/Linux 10 (buster)* How you installed Python: <!-- e.g. python.org/downloads, conda, brew, pyenv, apt, Windows store -->pip3 install pyinstaller * Did you also try this on another platform? Does it work there? yes. * [x] start with clean installation * [x] use the latest development version </issue> <code> [start of PyInstaller/_shared_with_waf.py] 1 #----------------------------------------------------------------------------- 2 # Copyright (c) 2005-2021, PyInstaller Development Team. 3 # 4 # Distributed under the terms of the GNU General Public License (version 2 5 # or later) with exception for distributing the bootloader. 6 # 7 # The full license is in the file COPYING.txt, distributed with this software. 8 # 9 # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception) 10 #----------------------------------------------------------------------------- 11 """ 12 Code to be shared by PyInstaller and the bootloader/wscript file. 13 14 This code must not assume that either PyInstaller or any of its dependencies installed. I.e., the only imports allowed 15 in here are standard library ones. Within reason, it is preferable that this file should still run under Python 2.7 as 16 many compiler docker images still have only Python 2 installed. 17 """ 18 19 import platform 20 import re 21 22 23 def _pyi_machine(machine, system): 24 # type: (str, str) -> str 25 """ 26 Choose an intentionally simplified architecture identifier to be used in the bootloader's directory name. 27 28 Args: 29 machine: 30 The output of ``platform.machine()`` or any known architecture alias or shorthand that may be used by a 31 C compiler. 32 system: 33 The output of ``platform.system()`` on the target machine. 34 Returns: 35 Either a string tag or, on platforms that don't need an architecture tag, ``None``. 36 37 Ideally, we would just use ``platform.machine()`` directly, but that makes cross-compiling the bootloader almost 38 impossible, because you need to know at compile time exactly what ``platform.machine()`` will be at run time, based 39 only on the machine name alias or shorthand reported by the C compiler at the build time. Rather, use a loose 40 differentiation, and trust that anyone mixing armv6l with armv6h knows what they are doing. 41 """ 42 # See the corresponding tests in tests/unit/test_compat.py for examples. 43 44 if platform.machine() == "sw_64" or platform.machine() == "loongarch64": 45 # This explicitly inhibits cross compiling the bootloader for or on SunWay and LoongArch machine. 46 return platform.machine() 47 48 if system != "Linux": 49 # No architecture specifier for anything par Linux. 50 # - Windows only has one 32 and one 64 bit architecture, but lots of aliases for each so it is both pointless 51 # and painful to give Windows an architecture specifier. 52 # - macOS is on two 64 bit architectures, but they are merged into one "universal2" bootloader. 53 # - BSD supports a wide range of architectures, but according to PyPI's download statistics, every one of our 54 # BSD users are on x86_64. This may change in the distant future. 55 return 56 57 if machine.startswith(("arm", "aarch")): 58 # ARM has a huge number of similar and aliased sub-versions, such as armv5, armv6l armv8h, aarch64. 59 return "arm" 60 if machine in ("x86_64", "x64", "x86"): 61 return "intel" 62 if re.fullmatch("i[1-6]86", machine): 63 return "intel" 64 if machine.startswith(("ppc", "powerpc")): 65 # PowerPC comes in 64 vs 32 bit and little vs big endian variants. 66 return "ppc" 67 if machine in ("mips64", "mips"): 68 return "mips" 69 # Machines with no known aliases :) 70 if machine in ("s390x",): 71 return machine 72 73 # Unknown architectures are allowed by default, but will all be placed under one directory. In theory, trying to 74 # have multiple unknown architectures in one copy of PyInstaller will not work, but that should be sufficiently 75 # unlikely to ever happen. 76 return "unknown" 77 [end of PyInstaller/_shared_with_waf.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/PyInstaller/_shared_with_waf.py b/PyInstaller/_shared_with_waf.py --- a/PyInstaller/_shared_with_waf.py +++ b/PyInstaller/_shared_with_waf.py @@ -57,6 +57,10 @@ if machine.startswith(("arm", "aarch")): # ARM has a huge number of similar and aliased sub-versions, such as armv5, armv6l armv8h, aarch64. return "arm" + if machine in ("thumb"): + # Reported by waf/gcc when Thumb instruction set is enabled on 32-bit ARM. The platform.machine() returns "arm" + # regardless of the instruction set. + return "arm" if machine in ("x86_64", "x64", "x86"): return "intel" if re.fullmatch("i[1-6]86", machine):
{"golden_diff": "diff --git a/PyInstaller/_shared_with_waf.py b/PyInstaller/_shared_with_waf.py\n--- a/PyInstaller/_shared_with_waf.py\n+++ b/PyInstaller/_shared_with_waf.py\n@@ -57,6 +57,10 @@\n if machine.startswith((\"arm\", \"aarch\")):\n # ARM has a huge number of similar and aliased sub-versions, such as armv5, armv6l armv8h, aarch64.\n return \"arm\"\n+ if machine in (\"thumb\"):\n+ # Reported by waf/gcc when Thumb instruction set is enabled on 32-bit ARM. The platform.machine() returns \"arm\"\n+ # regardless of the instruction set.\n+ return \"arm\"\n if machine in (\"x86_64\", \"x64\", \"x86\"):\n return \"intel\"\n if re.fullmatch(\"i[1-6]86\", machine):\n", "issue": "Installation on Armv6l creates a Linux-32bit-unknown bootloader.\n## Description of the issue\r\nWhen installing on a RPi 1b you get an incorrectly named bootloader created. The directory is called Linux-32bit-unknown and not Linux-32bit-arm.\r\n\r\n### Context information (for bug reports)\r\n\r\n* Output of `pyinstaller --version`: ```4.8```\r\n* Version of Python: <!-- e.g. 3.7 --> 3.7\r\n* Platform: <!-- e.g GNU/Linux (distribution), Windows (language settings), OS X, FreeBSD --> Raspbian GNU/Linux 10 (buster)* How you installed Python: <!-- e.g. python.org/downloads, conda, brew, pyenv, apt, Windows store -->pip3 install pyinstaller\r\n* Did you also try this on another platform? Does it work there? yes.\r\n\r\n\r\n\r\n * [x] start with clean installation\r\n * [x] use the latest development version\r\n\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2021, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\"\"\"\nCode to be shared by PyInstaller and the bootloader/wscript file.\n\nThis code must not assume that either PyInstaller or any of its dependencies installed. I.e., the only imports allowed\nin here are standard library ones. Within reason, it is preferable that this file should still run under Python 2.7 as\nmany compiler docker images still have only Python 2 installed.\n\"\"\"\n\nimport platform\nimport re\n\n\ndef _pyi_machine(machine, system):\n # type: (str, str) -> str\n \"\"\"\n Choose an intentionally simplified architecture identifier to be used in the bootloader's directory name.\n\n Args:\n machine:\n The output of ``platform.machine()`` or any known architecture alias or shorthand that may be used by a\n C compiler.\n system:\n The output of ``platform.system()`` on the target machine.\n Returns:\n Either a string tag or, on platforms that don't need an architecture tag, ``None``.\n\n Ideally, we would just use ``platform.machine()`` directly, but that makes cross-compiling the bootloader almost\n impossible, because you need to know at compile time exactly what ``platform.machine()`` will be at run time, based\n only on the machine name alias or shorthand reported by the C compiler at the build time. Rather, use a loose\n differentiation, and trust that anyone mixing armv6l with armv6h knows what they are doing.\n \"\"\"\n # See the corresponding tests in tests/unit/test_compat.py for examples.\n\n if platform.machine() == \"sw_64\" or platform.machine() == \"loongarch64\":\n # This explicitly inhibits cross compiling the bootloader for or on SunWay and LoongArch machine.\n return platform.machine()\n\n if system != \"Linux\":\n # No architecture specifier for anything par Linux.\n # - Windows only has one 32 and one 64 bit architecture, but lots of aliases for each so it is both pointless\n # and painful to give Windows an architecture specifier.\n # - macOS is on two 64 bit architectures, but they are merged into one \"universal2\" bootloader.\n # - BSD supports a wide range of architectures, but according to PyPI's download statistics, every one of our\n # BSD users are on x86_64. This may change in the distant future.\n return\n\n if machine.startswith((\"arm\", \"aarch\")):\n # ARM has a huge number of similar and aliased sub-versions, such as armv5, armv6l armv8h, aarch64.\n return \"arm\"\n if machine in (\"x86_64\", \"x64\", \"x86\"):\n return \"intel\"\n if re.fullmatch(\"i[1-6]86\", machine):\n return \"intel\"\n if machine.startswith((\"ppc\", \"powerpc\")):\n # PowerPC comes in 64 vs 32 bit and little vs big endian variants.\n return \"ppc\"\n if machine in (\"mips64\", \"mips\"):\n return \"mips\"\n # Machines with no known aliases :)\n if machine in (\"s390x\",):\n return machine\n\n # Unknown architectures are allowed by default, but will all be placed under one directory. In theory, trying to\n # have multiple unknown architectures in one copy of PyInstaller will not work, but that should be sufficiently\n # unlikely to ever happen.\n return \"unknown\"\n", "path": "PyInstaller/_shared_with_waf.py"}]}
1,737
210
gh_patches_debug_11874
rasdani/github-patches
git_diff
kubeflow__pipelines-2213
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [Component] GCP dataproc create_cluster component cannot correctly specify image_version. Issue: When specifying not-null image version, create_cluster component raises: `<HttpError 400 when requesting https://dataproc.googleapis.com/v1/projects/ml-pipeline-dogfood/regions/us-central1/clusters?alt=json&requestId=7c933fdacb068cd6811fb40b8334a3d4 returned "Invalid JSON payload received. Unknown name "softwareConfig" at 'cluster': Cannot find field.">` Initial investigation shows that in [here](https://github.com/kubeflow/pipelines/blob/7dab30085e2edda6fb4ecb61a61c9f37664009a1/components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_create_cluster.py#L70), `softwareConfig` was specified as a top-layer member of cluster payload, but actually it should be a member of cluster['config']. See [this reference](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.clusters#Cluster). Will work out a fix shortly. </issue> <code> [start of components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_create_cluster.py] 1 # Copyright 2018 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 import json 15 16 from fire import decorators 17 from ._client import DataprocClient 18 from kfp_component.core import KfpExecutionContext, display 19 from .. import common as gcp_common 20 21 @decorators.SetParseFns(image_version=str) 22 def create_cluster(project_id, region, name=None, name_prefix=None, 23 initialization_actions=None, config_bucket=None, image_version=None, 24 cluster=None, wait_interval=30): 25 """Creates a DataProc cluster under a project. 26 27 Args: 28 project_id (str): Required. The ID of the Google Cloud Platform project 29 that the cluster belongs to. 30 region (str): Required. The Cloud Dataproc region in which to handle the 31 request. 32 name (str): Optional. The cluster name. Cluster names within a project 33 must be unique. Names of deleted clusters can be reused. 34 name_prefix (str): Optional. The prefix of the cluster name. 35 initialization_actions (list): Optional. List of GCS URIs of executables 36 to execute on each node after config is completed. By default, 37 executables are run on master and all worker nodes. 38 config_bucket (str): Optional. A Google Cloud Storage bucket used to 39 stage job dependencies, config files, and job driver console output. 40 image_version (str): Optional. The version of software inside the cluster. 41 cluster (dict): Optional. The full cluster config. See [full details]( 42 https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.clusters#Cluster) 43 wait_interval (int): The wait seconds between polling the operation. 44 Defaults to 30s. 45 46 Returns: 47 The created cluster object. 48 49 Output Files: 50 $KFP_OUTPUT_PATH/dataproc/cluster_name.txt: The cluster name of the 51 created cluster. 52 """ 53 if not cluster: 54 cluster = {} 55 cluster['projectId'] = project_id 56 if 'config' not in cluster: 57 cluster['config'] = {} 58 if name: 59 cluster['clusterName'] = name 60 if initialization_actions: 61 cluster['config']['initializationActions'] = list( 62 map(lambda file: { 63 'executableFile': file 64 }, initialization_actions) 65 ) 66 if config_bucket: 67 cluster['config']['configBucket'] = config_bucket 68 if image_version: 69 if 'softwareConfig' not in cluster: 70 cluster['softwareConfig'] = {} 71 cluster['softwareConfig']['imageVersion'] = image_version 72 73 return _create_cluster_internal(project_id, region, cluster, name_prefix, 74 wait_interval) 75 76 def _create_cluster_internal(project_id, region, cluster, name_prefix, 77 wait_interval): 78 client = DataprocClient() 79 operation_name = None 80 with KfpExecutionContext( 81 on_cancel=lambda: client.cancel_operation(operation_name)) as ctx: 82 _set_cluster_name(cluster, ctx.context_id(), name_prefix) 83 _dump_metadata(cluster, region) 84 operation = client.create_cluster(project_id, region, cluster, 85 request_id=ctx.context_id()) 86 operation_name = operation.get('name') 87 operation = client.wait_for_operation_done(operation_name, 88 wait_interval) 89 return _dump_cluster(operation.get('response')) 90 91 def _set_cluster_name(cluster, context_id, name_prefix): 92 if 'clusterName' in cluster: 93 return 94 if not name_prefix: 95 name_prefix = 'cluster' 96 cluster['clusterName'] = name_prefix + '-' + context_id 97 98 def _dump_metadata(cluster, region): 99 display.display(display.Link( 100 'https://console.cloud.google.com/dataproc/clusters/{}?project={}&region={}'.format( 101 cluster.get('clusterName'), cluster.get('projectId'), region), 102 'Cluster Details' 103 )) 104 105 def _dump_cluster(cluster): 106 gcp_common.dump_file('/tmp/kfp/output/dataproc/cluster.json', 107 json.dumps(cluster)) 108 gcp_common.dump_file('/tmp/kfp/output/dataproc/cluster_name.txt', 109 cluster.get('clusterName')) 110 return cluster 111 [end of components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_create_cluster.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_create_cluster.py b/components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_create_cluster.py --- a/components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_create_cluster.py +++ b/components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_create_cluster.py @@ -66,9 +66,9 @@ if config_bucket: cluster['config']['configBucket'] = config_bucket if image_version: - if 'softwareConfig' not in cluster: - cluster['softwareConfig'] = {} - cluster['softwareConfig']['imageVersion'] = image_version + if 'softwareConfig' not in cluster['config']: + cluster['config']['softwareConfig'] = {} + cluster['config']['softwareConfig']['imageVersion'] = image_version return _create_cluster_internal(project_id, region, cluster, name_prefix, wait_interval)
{"golden_diff": "diff --git a/components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_create_cluster.py b/components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_create_cluster.py\n--- a/components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_create_cluster.py\n+++ b/components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_create_cluster.py\n@@ -66,9 +66,9 @@\n if config_bucket:\n cluster['config']['configBucket'] = config_bucket\n if image_version:\n- if 'softwareConfig' not in cluster:\n- cluster['softwareConfig'] = {}\n- cluster['softwareConfig']['imageVersion'] = image_version\n+ if 'softwareConfig' not in cluster['config']:\n+ cluster['config']['softwareConfig'] = {}\n+ cluster['config']['softwareConfig']['imageVersion'] = image_version\n \n return _create_cluster_internal(project_id, region, cluster, name_prefix,\n wait_interval)\n", "issue": "[Component] GCP dataproc create_cluster component cannot correctly specify image_version.\nIssue:\r\nWhen specifying not-null image version, create_cluster component raises:\r\n`<HttpError 400 when requesting \r\nhttps://dataproc.googleapis.com/v1/projects/ml-pipeline-dogfood/regions/us-central1/clusters?alt=json&requestId=7c933fdacb068cd6811fb40b8334a3d4\r\n returned \"Invalid JSON payload received. Unknown name \"softwareConfig\" at 'cluster': Cannot find field.\">`\r\n\r\nInitial investigation shows that in [here](https://github.com/kubeflow/pipelines/blob/7dab30085e2edda6fb4ecb61a61c9f37664009a1/components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_create_cluster.py#L70), `softwareConfig` was specified as a top-layer member of cluster payload, but actually it should be a member of cluster['config']. See [this reference](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.clusters#Cluster).\r\n\r\nWill work out a fix shortly.\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport json\n\nfrom fire import decorators\nfrom ._client import DataprocClient\nfrom kfp_component.core import KfpExecutionContext, display\nfrom .. import common as gcp_common\n\[email protected](image_version=str)\ndef create_cluster(project_id, region, name=None, name_prefix=None,\n initialization_actions=None, config_bucket=None, image_version=None,\n cluster=None, wait_interval=30):\n \"\"\"Creates a DataProc cluster under a project.\n\n Args:\n project_id (str): Required. The ID of the Google Cloud Platform project \n that the cluster belongs to.\n region (str): Required. The Cloud Dataproc region in which to handle the \n request.\n name (str): Optional. The cluster name. Cluster names within a project\n must be unique. Names of deleted clusters can be reused.\n name_prefix (str): Optional. The prefix of the cluster name.\n initialization_actions (list): Optional. List of GCS URIs of executables \n to execute on each node after config is completed. By default,\n executables are run on master and all worker nodes. \n config_bucket (str): Optional. A Google Cloud Storage bucket used to \n stage job dependencies, config files, and job driver console output.\n image_version (str): Optional. The version of software inside the cluster.\n cluster (dict): Optional. The full cluster config. See [full details](\n https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.clusters#Cluster)\n wait_interval (int): The wait seconds between polling the operation. \n Defaults to 30s.\n\n Returns:\n The created cluster object.\n\n Output Files:\n $KFP_OUTPUT_PATH/dataproc/cluster_name.txt: The cluster name of the \n created cluster.\n \"\"\"\n if not cluster:\n cluster = {}\n cluster['projectId'] = project_id\n if 'config' not in cluster:\n cluster['config'] = {}\n if name:\n cluster['clusterName'] = name\n if initialization_actions:\n cluster['config']['initializationActions'] = list(\n map(lambda file: {\n 'executableFile': file\n }, initialization_actions)\n )\n if config_bucket:\n cluster['config']['configBucket'] = config_bucket\n if image_version:\n if 'softwareConfig' not in cluster:\n cluster['softwareConfig'] = {}\n cluster['softwareConfig']['imageVersion'] = image_version\n\n return _create_cluster_internal(project_id, region, cluster, name_prefix,\n wait_interval)\n\ndef _create_cluster_internal(project_id, region, cluster, name_prefix, \n wait_interval):\n client = DataprocClient()\n operation_name = None\n with KfpExecutionContext(\n on_cancel=lambda: client.cancel_operation(operation_name)) as ctx:\n _set_cluster_name(cluster, ctx.context_id(), name_prefix)\n _dump_metadata(cluster, region)\n operation = client.create_cluster(project_id, region, cluster, \n request_id=ctx.context_id())\n operation_name = operation.get('name')\n operation = client.wait_for_operation_done(operation_name, \n wait_interval)\n return _dump_cluster(operation.get('response'))\n\ndef _set_cluster_name(cluster, context_id, name_prefix):\n if 'clusterName' in cluster:\n return\n if not name_prefix:\n name_prefix = 'cluster'\n cluster['clusterName'] = name_prefix + '-' + context_id\n\ndef _dump_metadata(cluster, region):\n display.display(display.Link(\n 'https://console.cloud.google.com/dataproc/clusters/{}?project={}&region={}'.format(\n cluster.get('clusterName'), cluster.get('projectId'), region),\n 'Cluster Details'\n ))\n\ndef _dump_cluster(cluster):\n gcp_common.dump_file('/tmp/kfp/output/dataproc/cluster.json', \n json.dumps(cluster))\n gcp_common.dump_file('/tmp/kfp/output/dataproc/cluster_name.txt',\n cluster.get('clusterName'))\n return cluster\n", "path": "components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_create_cluster.py"}]}
2,032
212
gh_patches_debug_3201
rasdani/github-patches
git_diff
pyjanitor-devs__pyjanitor-337
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [DOC] Remove example.py in examples directory The code in the example.py file currently reads some data frame from a file called 'dirty_data.xls'. We can change this to include a concrete example. </issue> <code> [start of examples/example.py] 1 import pandas as pd 2 3 import janitor as jn 4 5 df = ( 6 pd.read_excel("dirty_data.xlsx") 7 .clean_names() 8 .remove_empty() 9 .rename_column("%_allocated", "percent_allocated") 10 .rename_column("full_time_", "full_time") 11 .coalesce(["certification", "certification_1"], "certification") 12 .encode_categorical(["subject", "employee_status", "full_time"]) 13 .convert_excel_date("hire_date") 14 ) 15 16 print(df) 17 print(df.original_names) 18 [end of examples/example.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/example.py b/examples/example.py deleted file mode 100644 --- a/examples/example.py +++ /dev/null @@ -1,17 +0,0 @@ -import pandas as pd - -import janitor as jn - -df = ( - pd.read_excel("dirty_data.xlsx") - .clean_names() - .remove_empty() - .rename_column("%_allocated", "percent_allocated") - .rename_column("full_time_", "full_time") - .coalesce(["certification", "certification_1"], "certification") - .encode_categorical(["subject", "employee_status", "full_time"]) - .convert_excel_date("hire_date") -) - -print(df) -print(df.original_names)
{"golden_diff": "diff --git a/examples/example.py b/examples/example.py\ndeleted file mode 100644\n--- a/examples/example.py\n+++ /dev/null\n@@ -1,17 +0,0 @@\n-import pandas as pd\n-\n-import janitor as jn\n-\n-df = (\n- pd.read_excel(\"dirty_data.xlsx\")\n- .clean_names()\n- .remove_empty()\n- .rename_column(\"%_allocated\", \"percent_allocated\")\n- .rename_column(\"full_time_\", \"full_time\")\n- .coalesce([\"certification\", \"certification_1\"], \"certification\")\n- .encode_categorical([\"subject\", \"employee_status\", \"full_time\"])\n- .convert_excel_date(\"hire_date\")\n-)\n-\n-print(df)\n-print(df.original_names)\n", "issue": "[DOC] Remove example.py in examples directory\nThe code in the example.py file currently reads some data frame from a file called 'dirty_data.xls'.\r\nWe can change this to include a concrete example.\n", "before_files": [{"content": "import pandas as pd\n\nimport janitor as jn\n\ndf = (\n pd.read_excel(\"dirty_data.xlsx\")\n .clean_names()\n .remove_empty()\n .rename_column(\"%_allocated\", \"percent_allocated\")\n .rename_column(\"full_time_\", \"full_time\")\n .coalesce([\"certification\", \"certification_1\"], \"certification\")\n .encode_categorical([\"subject\", \"employee_status\", \"full_time\"])\n .convert_excel_date(\"hire_date\")\n)\n\nprint(df)\nprint(df.original_names)\n", "path": "examples/example.py"}]}
712
167
gh_patches_debug_24396
rasdani/github-patches
git_diff
graspologic-org__graspologic-438
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> remove ari return value from AutoGMM.fit_predict Doesn't match with API well, should just get rid of this </issue> <code> [start of graspy/cluster/base.py] 1 # Copyright 2019 NeuroData (http://neurodata.io) 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from abc import ABC, abstractmethod 16 17 from sklearn.base import BaseEstimator, ClusterMixin 18 from sklearn.metrics import adjusted_rand_score 19 from sklearn.utils.validation import check_is_fitted 20 21 22 class BaseCluster(ABC, BaseEstimator, ClusterMixin): 23 """ 24 Base clustering class. 25 """ 26 27 @abstractmethod 28 def fit(self, X, y=None): 29 """ 30 Compute clusters based on given method. 31 32 Parameters 33 ---------- 34 X : array-like, shape (n_samples, n_features) 35 List of n_features-dimensional data points. Each row 36 corresponds to a single data point. 37 38 y : array-like, shape (n_samples,), optional (default=None) 39 List of labels for X if available. Used to compute 40 ARI scores. 41 42 Returns 43 ------- 44 self 45 """ 46 47 def predict(self, X, y=None): # pragma: no cover 48 """ 49 Predict clusters based on best model. 50 51 Parameters 52 ---------- 53 X : array-like, shape (n_samples, n_features) 54 List of n_features-dimensional data points. Each row 55 corresponds to a single data point. 56 y : array-like, shape (n_samples, ), optional (default=None) 57 List of labels for X if available. Used to compute 58 ARI scores. 59 60 Returns 61 ------- 62 labels : array, shape (n_samples,) 63 Component labels. 64 65 ari : float 66 Adjusted Rand index. Only returned if y is given. 67 """ 68 # Check if fit is already called 69 check_is_fitted(self, ["model_"], all_or_any=all) 70 labels = self.model_.predict(X) 71 72 if y is None: 73 return labels 74 else: 75 ari = adjusted_rand_score(y, labels) 76 return labels, ari 77 78 def fit_predict(self, X, y=None): # pragma: no cover 79 """ 80 Fit the models and predict clusters based on best model. 81 82 Parameters 83 ---------- 84 X : array-like, shape (n_samples, n_features) 85 List of n_features-dimensional data points. Each row 86 corresponds to a single data point. 87 88 y : array-like, shape (n_samples,), optional (default=None) 89 List of labels for X if available. Used to compute 90 ARI scores. 91 92 Returns 93 ------- 94 labels : array, shape (n_samples,) 95 Component labels. 96 97 ari : float 98 Adjusted Rand index. Only returned if y is given. 99 """ 100 self.fit(X, y) 101 102 if y is None: 103 labels = self.predict(X, y) 104 return labels 105 else: 106 labels, ari = self.predict(X, y) 107 return labels, ari 108 [end of graspy/cluster/base.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/graspy/cluster/base.py b/graspy/cluster/base.py --- a/graspy/cluster/base.py +++ b/graspy/cluster/base.py @@ -61,19 +61,12 @@ ------- labels : array, shape (n_samples,) Component labels. - - ari : float - Adjusted Rand index. Only returned if y is given. """ # Check if fit is already called check_is_fitted(self, ["model_"], all_or_any=all) labels = self.model_.predict(X) - if y is None: - return labels - else: - ari = adjusted_rand_score(y, labels) - return labels, ari + return labels def fit_predict(self, X, y=None): # pragma: no cover """ @@ -93,15 +86,8 @@ ------- labels : array, shape (n_samples,) Component labels. - - ari : float - Adjusted Rand index. Only returned if y is given. """ self.fit(X, y) - if y is None: - labels = self.predict(X, y) - return labels - else: - labels, ari = self.predict(X, y) - return labels, ari + labels = self.predict(X, y) + return labels
{"golden_diff": "diff --git a/graspy/cluster/base.py b/graspy/cluster/base.py\n--- a/graspy/cluster/base.py\n+++ b/graspy/cluster/base.py\n@@ -61,19 +61,12 @@\n -------\n labels : array, shape (n_samples,)\n Component labels.\n-\n- ari : float\n- Adjusted Rand index. Only returned if y is given.\n \"\"\"\n # Check if fit is already called\n check_is_fitted(self, [\"model_\"], all_or_any=all)\n labels = self.model_.predict(X)\n \n- if y is None:\n- return labels\n- else:\n- ari = adjusted_rand_score(y, labels)\n- return labels, ari\n+ return labels\n \n def fit_predict(self, X, y=None): # pragma: no cover\n \"\"\"\n@@ -93,15 +86,8 @@\n -------\n labels : array, shape (n_samples,)\n Component labels.\n-\n- ari : float\n- Adjusted Rand index. Only returned if y is given.\n \"\"\"\n self.fit(X, y)\n \n- if y is None:\n- labels = self.predict(X, y)\n- return labels\n- else:\n- labels, ari = self.predict(X, y)\n- return labels, ari\n+ labels = self.predict(X, y)\n+ return labels\n", "issue": "remove ari return value from AutoGMM.fit_predict\nDoesn't match with API well, should just get rid of this\n", "before_files": [{"content": "# Copyright 2019 NeuroData (http://neurodata.io)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom abc import ABC, abstractmethod\n\nfrom sklearn.base import BaseEstimator, ClusterMixin\nfrom sklearn.metrics import adjusted_rand_score\nfrom sklearn.utils.validation import check_is_fitted\n\n\nclass BaseCluster(ABC, BaseEstimator, ClusterMixin):\n \"\"\"\n Base clustering class.\n \"\"\"\n\n @abstractmethod\n def fit(self, X, y=None):\n \"\"\"\n Compute clusters based on given method.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n List of n_features-dimensional data points. Each row\n corresponds to a single data point.\n\n y : array-like, shape (n_samples,), optional (default=None)\n List of labels for X if available. Used to compute\n ARI scores.\n\n Returns\n -------\n self\n \"\"\"\n\n def predict(self, X, y=None): # pragma: no cover\n \"\"\"\n Predict clusters based on best model.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n List of n_features-dimensional data points. Each row\n corresponds to a single data point.\n y : array-like, shape (n_samples, ), optional (default=None)\n List of labels for X if available. Used to compute\n ARI scores.\n\n Returns\n -------\n labels : array, shape (n_samples,)\n Component labels.\n\n ari : float\n Adjusted Rand index. Only returned if y is given.\n \"\"\"\n # Check if fit is already called\n check_is_fitted(self, [\"model_\"], all_or_any=all)\n labels = self.model_.predict(X)\n\n if y is None:\n return labels\n else:\n ari = adjusted_rand_score(y, labels)\n return labels, ari\n\n def fit_predict(self, X, y=None): # pragma: no cover\n \"\"\"\n Fit the models and predict clusters based on best model.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n List of n_features-dimensional data points. Each row\n corresponds to a single data point.\n\n y : array-like, shape (n_samples,), optional (default=None)\n List of labels for X if available. Used to compute\n ARI scores.\n\n Returns\n -------\n labels : array, shape (n_samples,)\n Component labels.\n\n ari : float\n Adjusted Rand index. Only returned if y is given.\n \"\"\"\n self.fit(X, y)\n\n if y is None:\n labels = self.predict(X, y)\n return labels\n else:\n labels, ari = self.predict(X, y)\n return labels, ari\n", "path": "graspy/cluster/base.py"}]}
1,501
313
gh_patches_debug_18712
rasdani/github-patches
git_diff
TileDB-Inc__TileDB-Py-1936
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Show enumerated value-types in enum-printer Repro: ``` >>> A=tiledb.open('/var/p/obs') >>> A.enum('louvain') Enumeration(name='louvain', cell_val_num=4294967295, ordered=False, values=['CD4 T cells', 'CD14+ Monocytes', 'B cells', 'CD8 T cells', 'NK cells', 'FCGR3A+ Monocytes', 'Dendritic cells', 'Megakaryocytes']) >>> A.enum('louvain').dtype dtype('<U1') >>> A.enum('louvain').dtype.name 'str32' ``` Request: `A.enum('louvain')` should reveal the value dtype. --- sc-43628 </issue> <code> [start of tiledb/enumeration.py] 1 from __future__ import annotations 2 3 import io 4 from typing import Any, Optional, Sequence 5 6 import numpy as np 7 from numpy.typing import NDArray 8 9 import tiledb.cc as lt 10 11 from .ctx import Ctx, CtxMixin 12 from .datatypes import DataType 13 14 15 class Enumeration(CtxMixin, lt.Enumeration): 16 """ 17 Represents a TileDB Enumeration. 18 """ 19 20 def __init__( 21 self, 22 name: str, 23 ordered: bool, 24 values: Optional[Sequence[Any]] = None, 25 dtype: Optional[np.dtype] = None, 26 ctx: Optional[Ctx] = None, 27 ): 28 """Class representing the TileDB Enumeration. 29 30 :param name: The name of the to-be created Enumeration 31 :type name: str 32 :param ordered: Whether or not to consider this enumeration ordered 33 :type ordered: bool 34 :param values: A Numpy array of values for this enumeration 35 :type values: np.array 36 :param ctx: A TileDB context 37 :type ctx: tiledb.Ctx 38 """ 39 if values is None or len(values) == 0: 40 if dtype is None: 41 raise ValueError("dtype must be provied for empty enumeration") 42 super().__init__(ctx, name, np.dtype(dtype), ordered) 43 44 values = np.array(values) 45 if np.dtype(values.dtype).kind in "US": 46 dtype = ( 47 lt.DataType.STRING_UTF8 48 if values.dtype.kind == "U" 49 else lt.DataType.STRING_ASCII 50 ) 51 super().__init__(ctx, name, values, ordered, dtype) 52 else: 53 super().__init__(ctx, name, ordered, values, np.array([])) 54 55 @property 56 def name(self) -> str: 57 """The enumeration label string. 58 59 :rtype: str 60 """ 61 return super().name 62 63 @property 64 def dtype(self) -> np.dtype: 65 """Numpy dtype representation of the enumeration type. 66 67 :rtype: numpy.dtype 68 """ 69 return DataType.from_tiledb(super().type).np_dtype 70 71 @property 72 def cell_val_num(self) -> int: 73 """The enumeration's cell value number. 74 75 :rtype: int 76 """ 77 return super().cell_val_num 78 79 @property 80 def ordered(self) -> bool: 81 """True if the enumeration is ordered. 82 83 :rtype: bool 84 """ 85 return super().ordered 86 87 def values(self) -> NDArray: 88 """The values of the enumeration. 89 90 :rtype: NDArray 91 """ 92 if self.dtype.kind == "U": 93 return np.array(super().str_values(), dtype=np.str_) 94 elif self.dtype.kind == "S": 95 return np.array(super().str_values(), dtype=np.bytes_) 96 else: 97 return np.array(super().values(), dtype=self.dtype) 98 99 def extend(self, values: Sequence[Any]) -> Enumeration: 100 """Add additional values to the enumeration. 101 102 :rtype: Enumeration 103 """ 104 values = np.array(values) 105 if self.dtype.kind in "US" and values.dtype.kind not in "US": 106 raise lt.TileDBError("Passed in enumeration must be string type") 107 108 if np.issubdtype(self.dtype, np.integer) and not np.issubdtype( 109 values.dtype, np.integer 110 ): 111 raise lt.TileDBError("Passed in enumeration must be integer type") 112 113 return Enumeration.from_pybind11(self._ctx, super().extend(values)) 114 115 def __eq__(self, other): 116 if not isinstance(other, Enumeration): 117 return False 118 119 return all( 120 [ 121 self.name == other.name, 122 self.dtype == other.dtype, 123 self.cell_val_num == other.cell_val_num, 124 self.ordered == other.ordered, 125 np.array_equal(self.values(), other.values()), 126 ] 127 ) 128 129 def __repr__(self): 130 # use safe repr if pybind11 constructor failed 131 if self._ctx is None: 132 return object.__repr__(self) 133 134 return f"Enumeration(name='{self.name}', cell_val_num={self.cell_val_num}, ordered={self.ordered}, values={list(self.values())})" 135 136 def _repr_html_(self): 137 output = io.StringIO() 138 139 output.write("<table>") 140 output.write("<tr>") 141 output.write("<th>Name</th>") 142 output.write("<th>Data Type</th>") 143 output.write("<th>Ordered</th>") 144 output.write("</tr>") 145 output.write(f"{self._repr_html_row_only_()}") 146 output.write("</table>") 147 148 return output.getvalue() 149 150 def _repr_html_row_only_(self): 151 output = io.StringIO() 152 153 output.write("<tr>") 154 output.write(f"<td>{self.name}</td>") 155 output.write(f"<td>{self.dtype}</td>") 156 output.write(f"<td>{self.cell_val_num}</td>") 157 output.write(f"<td>{self.ordered}</td>") 158 output.write("</tr>") 159 160 return output.getvalue() 161 [end of tiledb/enumeration.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/tiledb/enumeration.py b/tiledb/enumeration.py --- a/tiledb/enumeration.py +++ b/tiledb/enumeration.py @@ -33,6 +33,8 @@ :type ordered: bool :param values: A Numpy array of values for this enumeration :type values: np.array + :param dtype: The Numpy data type for this enumeration + :type dtype: np.dtype :param ctx: A TileDB context :type ctx: tiledb.Ctx """ @@ -131,7 +133,7 @@ if self._ctx is None: return object.__repr__(self) - return f"Enumeration(name='{self.name}', cell_val_num={self.cell_val_num}, ordered={self.ordered}, values={list(self.values())})" + return f"Enumeration(name='{self.name}', dtype={self.dtype}, dtype_name='{self.dtype.name}', cell_val_num={self.cell_val_num}, ordered={self.ordered}, values={list(self.values())})" def _repr_html_(self): output = io.StringIO()
{"golden_diff": "diff --git a/tiledb/enumeration.py b/tiledb/enumeration.py\n--- a/tiledb/enumeration.py\n+++ b/tiledb/enumeration.py\n@@ -33,6 +33,8 @@\n :type ordered: bool\n :param values: A Numpy array of values for this enumeration\n :type values: np.array\n+ :param dtype: The Numpy data type for this enumeration\n+ :type dtype: np.dtype\n :param ctx: A TileDB context\n :type ctx: tiledb.Ctx\n \"\"\"\n@@ -131,7 +133,7 @@\n if self._ctx is None:\n return object.__repr__(self)\n \n- return f\"Enumeration(name='{self.name}', cell_val_num={self.cell_val_num}, ordered={self.ordered}, values={list(self.values())})\"\n+ return f\"Enumeration(name='{self.name}', dtype={self.dtype}, dtype_name='{self.dtype.name}', cell_val_num={self.cell_val_num}, ordered={self.ordered}, values={list(self.values())})\"\n \n def _repr_html_(self):\n output = io.StringIO()\n", "issue": "Show enumerated value-types in enum-printer\nRepro:\r\n\r\n```\r\n>>> A=tiledb.open('/var/p/obs')\r\n\r\n>>> A.enum('louvain')\r\nEnumeration(name='louvain', cell_val_num=4294967295, ordered=False, values=['CD4 T cells', 'CD14+ Monocytes', 'B cells', 'CD8 T cells', 'NK cells', 'FCGR3A+ Monocytes', 'Dendritic cells', 'Megakaryocytes'])\r\n\r\n>>> A.enum('louvain').dtype\r\ndtype('<U1')\r\n\r\n>>> A.enum('louvain').dtype.name\r\n'str32'\r\n```\r\n\r\nRequest: `A.enum('louvain')` should reveal the value dtype.\r\n\r\n---\r\nsc-43628\n", "before_files": [{"content": "from __future__ import annotations\n\nimport io\nfrom typing import Any, Optional, Sequence\n\nimport numpy as np\nfrom numpy.typing import NDArray\n\nimport tiledb.cc as lt\n\nfrom .ctx import Ctx, CtxMixin\nfrom .datatypes import DataType\n\n\nclass Enumeration(CtxMixin, lt.Enumeration):\n \"\"\"\n Represents a TileDB Enumeration.\n \"\"\"\n\n def __init__(\n self,\n name: str,\n ordered: bool,\n values: Optional[Sequence[Any]] = None,\n dtype: Optional[np.dtype] = None,\n ctx: Optional[Ctx] = None,\n ):\n \"\"\"Class representing the TileDB Enumeration.\n\n :param name: The name of the to-be created Enumeration\n :type name: str\n :param ordered: Whether or not to consider this enumeration ordered\n :type ordered: bool\n :param values: A Numpy array of values for this enumeration\n :type values: np.array\n :param ctx: A TileDB context\n :type ctx: tiledb.Ctx\n \"\"\"\n if values is None or len(values) == 0:\n if dtype is None:\n raise ValueError(\"dtype must be provied for empty enumeration\")\n super().__init__(ctx, name, np.dtype(dtype), ordered)\n\n values = np.array(values)\n if np.dtype(values.dtype).kind in \"US\":\n dtype = (\n lt.DataType.STRING_UTF8\n if values.dtype.kind == \"U\"\n else lt.DataType.STRING_ASCII\n )\n super().__init__(ctx, name, values, ordered, dtype)\n else:\n super().__init__(ctx, name, ordered, values, np.array([]))\n\n @property\n def name(self) -> str:\n \"\"\"The enumeration label string.\n\n :rtype: str\n \"\"\"\n return super().name\n\n @property\n def dtype(self) -> np.dtype:\n \"\"\"Numpy dtype representation of the enumeration type.\n\n :rtype: numpy.dtype\n \"\"\"\n return DataType.from_tiledb(super().type).np_dtype\n\n @property\n def cell_val_num(self) -> int:\n \"\"\"The enumeration's cell value number.\n\n :rtype: int\n \"\"\"\n return super().cell_val_num\n\n @property\n def ordered(self) -> bool:\n \"\"\"True if the enumeration is ordered.\n\n :rtype: bool\n \"\"\"\n return super().ordered\n\n def values(self) -> NDArray:\n \"\"\"The values of the enumeration.\n\n :rtype: NDArray\n \"\"\"\n if self.dtype.kind == \"U\":\n return np.array(super().str_values(), dtype=np.str_)\n elif self.dtype.kind == \"S\":\n return np.array(super().str_values(), dtype=np.bytes_)\n else:\n return np.array(super().values(), dtype=self.dtype)\n\n def extend(self, values: Sequence[Any]) -> Enumeration:\n \"\"\"Add additional values to the enumeration.\n\n :rtype: Enumeration\n \"\"\"\n values = np.array(values)\n if self.dtype.kind in \"US\" and values.dtype.kind not in \"US\":\n raise lt.TileDBError(\"Passed in enumeration must be string type\")\n\n if np.issubdtype(self.dtype, np.integer) and not np.issubdtype(\n values.dtype, np.integer\n ):\n raise lt.TileDBError(\"Passed in enumeration must be integer type\")\n\n return Enumeration.from_pybind11(self._ctx, super().extend(values))\n\n def __eq__(self, other):\n if not isinstance(other, Enumeration):\n return False\n\n return all(\n [\n self.name == other.name,\n self.dtype == other.dtype,\n self.cell_val_num == other.cell_val_num,\n self.ordered == other.ordered,\n np.array_equal(self.values(), other.values()),\n ]\n )\n\n def __repr__(self):\n # use safe repr if pybind11 constructor failed\n if self._ctx is None:\n return object.__repr__(self)\n\n return f\"Enumeration(name='{self.name}', cell_val_num={self.cell_val_num}, ordered={self.ordered}, values={list(self.values())})\"\n\n def _repr_html_(self):\n output = io.StringIO()\n\n output.write(\"<table>\")\n output.write(\"<tr>\")\n output.write(\"<th>Name</th>\")\n output.write(\"<th>Data Type</th>\")\n output.write(\"<th>Ordered</th>\")\n output.write(\"</tr>\")\n output.write(f\"{self._repr_html_row_only_()}\")\n output.write(\"</table>\")\n\n return output.getvalue()\n\n def _repr_html_row_only_(self):\n output = io.StringIO()\n\n output.write(\"<tr>\")\n output.write(f\"<td>{self.name}</td>\")\n output.write(f\"<td>{self.dtype}</td>\")\n output.write(f\"<td>{self.cell_val_num}</td>\")\n output.write(f\"<td>{self.ordered}</td>\")\n output.write(\"</tr>\")\n\n return output.getvalue()\n", "path": "tiledb/enumeration.py"}]}
2,154
253
gh_patches_debug_15455
rasdani/github-patches
git_diff
kubeflow__pipelines-9088
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> v2 - support resource requests and limits * [x] https://github.com/kubeflow/pipelines/pull/7045 * [x] #7043 * [x] #7047 </issue> <code> [start of samples/core/resource_spec/resource_spec_v2.py] 1 # Copyright 2020-2021 The Kubeflow Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import os 16 from kfp import dsl 17 18 # In tests, we install a KFP package from the PR under test. Users should not 19 # normally need to specify `kfp_package_path` in their component definitions. 20 _KFP_PACKAGE_PATH = os.getenv('KFP_PACKAGE_PATH') 21 22 23 @dsl.component(kfp_package_path=_KFP_PACKAGE_PATH) 24 def training_op(n: int) -> int: 25 # quickly allocate a lot of memory to verify memory is enough 26 a = [i for i in range(n)] 27 return len(a) 28 29 30 @dsl.pipeline( 31 name='pipeline-with-resource-spec', 32 description='A pipeline with resource specification.') 33 def my_pipeline(n: int = 11234567): 34 # For units of these resource limits, 35 # refer to https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-units-in-kubernetes 36 # 11234567 roughly needs 400Mi+ memory. 37 # 38 # Note, with v2 python components, there's a larger memory overhead caused 39 # by installing KFP SDK in the component, so we had to increase memory limit to 650M. 40 training_task = training_op(n=n).set_cpu_limit('1').set_memory_limit('650M') 41 42 # TODO(Bobgy): other resource specs like cpu requests, memory requests and 43 # GPU limits are not available yet: https://github.com/kubeflow/pipelines/issues/6354. 44 # There are other resource spec you can set. 45 # For example, to use TPU, add the following: 46 # .add_node_selector_constraint('cloud.google.com/gke-accelerator', 'tpu-v3') 47 # .set_gpu_limit(1) 48 [end of samples/core/resource_spec/resource_spec_v2.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/samples/core/resource_spec/resource_spec_v2.py b/samples/core/resource_spec/resource_spec_v2.py --- a/samples/core/resource_spec/resource_spec_v2.py +++ b/samples/core/resource_spec/resource_spec_v2.py @@ -38,6 +38,9 @@ # Note, with v2 python components, there's a larger memory overhead caused # by installing KFP SDK in the component, so we had to increase memory limit to 650M. training_task = training_op(n=n).set_cpu_limit('1').set_memory_limit('650M') + + # TODO(gkcalat): enable requests once SDK implements the feature + # training_task = training_task.set_cpu_request('1').set_memory_request('650M') # TODO(Bobgy): other resource specs like cpu requests, memory requests and # GPU limits are not available yet: https://github.com/kubeflow/pipelines/issues/6354.
{"golden_diff": "diff --git a/samples/core/resource_spec/resource_spec_v2.py b/samples/core/resource_spec/resource_spec_v2.py\n--- a/samples/core/resource_spec/resource_spec_v2.py\n+++ b/samples/core/resource_spec/resource_spec_v2.py\n@@ -38,6 +38,9 @@\n # Note, with v2 python components, there's a larger memory overhead caused\n # by installing KFP SDK in the component, so we had to increase memory limit to 650M.\n training_task = training_op(n=n).set_cpu_limit('1').set_memory_limit('650M')\n+ \n+ # TODO(gkcalat): enable requests once SDK implements the feature\n+ # training_task = training_task.set_cpu_request('1').set_memory_request('650M')\n \n # TODO(Bobgy): other resource specs like cpu requests, memory requests and\n # GPU limits are not available yet: https://github.com/kubeflow/pipelines/issues/6354.\n", "issue": "v2 - support resource requests and limits\n* [x] https://github.com/kubeflow/pipelines/pull/7045\r\n* [x] #7043\r\n* [x] #7047\r\n\n", "before_files": [{"content": "# Copyright 2020-2021 The Kubeflow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nfrom kfp import dsl\n\n# In tests, we install a KFP package from the PR under test. Users should not\n# normally need to specify `kfp_package_path` in their component definitions.\n_KFP_PACKAGE_PATH = os.getenv('KFP_PACKAGE_PATH')\n\n\[email protected](kfp_package_path=_KFP_PACKAGE_PATH)\ndef training_op(n: int) -> int:\n # quickly allocate a lot of memory to verify memory is enough\n a = [i for i in range(n)]\n return len(a)\n\n\[email protected](\n name='pipeline-with-resource-spec',\n description='A pipeline with resource specification.')\ndef my_pipeline(n: int = 11234567):\n # For units of these resource limits,\n # refer to https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-units-in-kubernetes\n # 11234567 roughly needs 400Mi+ memory.\n #\n # Note, with v2 python components, there's a larger memory overhead caused\n # by installing KFP SDK in the component, so we had to increase memory limit to 650M.\n training_task = training_op(n=n).set_cpu_limit('1').set_memory_limit('650M')\n\n # TODO(Bobgy): other resource specs like cpu requests, memory requests and\n # GPU limits are not available yet: https://github.com/kubeflow/pipelines/issues/6354.\n # There are other resource spec you can set.\n # For example, to use TPU, add the following:\n # .add_node_selector_constraint('cloud.google.com/gke-accelerator', 'tpu-v3')\n # .set_gpu_limit(1)\n", "path": "samples/core/resource_spec/resource_spec_v2.py"}]}
1,203
218
gh_patches_debug_22823
rasdani/github-patches
git_diff
Kinto__kinto-1923
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Fallback to jsonschema DraftV4 in case Draftv7 is not available Mitigate jsonschema alfa version mandatory. This is quite a blocker to be honest. I upgraded a bunch of packages related to Kinto and I guess we could mitigate the issue by fallbacking to Draftv4. Refs #1808 </issue> <code> [start of kinto/schema_validation.py] 1 import colander 2 from jsonschema import Draft7Validator, ValidationError, SchemaError, RefResolutionError, validate 3 from pyramid.settings import asbool 4 5 from kinto.core import utils 6 from kinto.core.errors import raise_invalid 7 from kinto.views import object_exists_or_404 8 9 10 class JSONSchemaMapping(colander.SchemaNode): 11 def schema_type(self, **kw): 12 return colander.Mapping(unknown="preserve") 13 14 def deserialize(self, cstruct=colander.null): 15 # Start by deserializing a simple mapping. 16 validated = super().deserialize(cstruct) 17 18 # In case it is optional in parent schema. 19 if not validated or validated in (colander.null, colander.drop): 20 return validated 21 try: 22 check_schema(validated) 23 except ValidationError as e: 24 self.raise_invalid(e.message) 25 return validated 26 27 28 def check_schema(data): 29 try: 30 Draft7Validator.check_schema(data) 31 except SchemaError as e: 32 message = e.path.pop() + e.message 33 raise ValidationError(message) 34 35 36 def validate_schema(data, schema, ignore_fields=[]): 37 required_fields = [f for f in schema.get("required", []) if f not in ignore_fields] 38 # jsonschema doesn't accept 'required': [] yet. 39 # See https://github.com/Julian/jsonschema/issues/337. 40 # In the meantime, strip out 'required' if no other fields are required. 41 if required_fields: 42 schema = {**schema, "required": required_fields} 43 else: 44 schema = {f: v for f, v in schema.items() if f != "required"} 45 46 data = {f: v for f, v in data.items() if f not in ignore_fields} 47 48 try: 49 validate(data, schema) 50 except ValidationError as e: 51 if e.path: 52 field = e.path[-1] 53 elif e.validator_value: 54 field = e.validator_value[-1] 55 else: 56 field = e.schema_path[-1] 57 e.field = field 58 raise e 59 # Raise an error here if a reference in the schema doesn't resolve. 60 # jsonschema doesn't provide schema validation checking upon creation yet, 61 # it must be validated against data. 62 # See https://github.com/Julian/jsonschema/issues/399 63 # For future support https://github.com/Julian/jsonschema/issues/346. 64 except RefResolutionError as e: 65 raise e 66 67 68 def validate_from_bucket_schema_or_400(data, resource_name, request, ignore_fields=[]): 69 """Lookup in the parent objects if a schema was defined for this resource. 70 71 If the schema validation feature is enabled, if a schema is/are defined, and if the 72 data does not validate it/them, then it raises a 400 exception. 73 """ 74 settings = request.registry.settings 75 schema_validation = "experimental_collection_schema_validation" 76 # If disabled from settings, do nothing. 77 if not asbool(settings.get(schema_validation)): 78 return 79 80 bucket_id = request.matchdict["bucket_id"] 81 bucket_uri = utils.instance_uri(request, "bucket", id=bucket_id) 82 buckets = request.bound_data.setdefault("buckets", {}) 83 if bucket_uri not in buckets: 84 # Unknown yet, fetch from storage. 85 bucket = object_exists_or_404( 86 request, collection_id="bucket", parent_id="", object_id=bucket_id 87 ) 88 buckets[bucket_uri] = bucket 89 90 # Let's see if the bucket defines a schema for this resource. 91 metadata_field = f"{resource_name}:schema" 92 bucket = buckets[bucket_uri] 93 if metadata_field not in bucket: 94 return 95 96 # Validate or fail with 400. 97 schema = bucket[metadata_field] 98 try: 99 validate_schema(data, schema, ignore_fields=ignore_fields) 100 except ValidationError as e: 101 raise_invalid(request, name=e.field, description=e.message) 102 except RefResolutionError as e: 103 raise_invalid(request, name="schema", description=str(e)) 104 [end of kinto/schema_validation.py] [start of setup.py] 1 import codecs 2 import os 3 from setuptools import setup, find_packages 4 5 # abspath here because setup.py may be __main__, in which case 6 # __file__ is not guaranteed to be absolute 7 here = os.path.abspath(os.path.dirname(__file__)) 8 9 10 def read_file(filename): 11 """Open a related file and return its content.""" 12 with codecs.open(os.path.join(here, filename), encoding="utf-8") as f: 13 content = f.read() 14 return content 15 16 17 README = read_file("README.rst") 18 CHANGELOG = read_file("CHANGELOG.rst") 19 CONTRIBUTORS = read_file("CONTRIBUTORS.rst") 20 21 REQUIREMENTS = [ 22 "bcrypt", 23 "colander >= 1.4.0", 24 "cornice", 25 "cornice_swagger >= 0.5.1", 26 "dockerflow", 27 "jsonschema >= 3.0.0a1", 28 "jsonpatch", 29 "logging-color-formatter >= 1.0.1", # Message interpolations. 30 "python-dateutil", 31 "pyramid >= 1.9.1, < 2.0", 32 "pyramid_multiauth >= 0.8", # User on policy selected event. 33 "transaction", 34 # pyramid_tm changed the location of their tween in 2.x and one of 35 # our tests fails on 2.0. 36 "pyramid_tm >= 2.1", 37 "requests", 38 "waitress", 39 "ujson >= 1.35", 40 ] 41 42 POSTGRESQL_REQUIRES = ["SQLAlchemy", "psycopg2 > 2.5", "zope.sqlalchemy"] 43 44 REDIS_REQUIRES = ["kinto_redis"] 45 46 MEMCACHED_REQUIRES = ["python-memcached"] 47 48 SETUP_REQUIRES = ["pytest-runner"] 49 50 TEST_REQUIREMENTS = ["bravado_core", "pytest", "WebTest"] 51 52 DEPENDENCY_LINKS = [] 53 54 MONITORING_REQUIRES = ["raven", "statsd", "newrelic", "werkzeug"] 55 56 ENTRY_POINTS = { 57 "paste.app_factory": ["main = kinto:main"], 58 "console_scripts": ["kinto = kinto.__main__:main"], 59 } 60 61 62 setup( 63 name="kinto", 64 version="11.3.0.dev0", 65 description="Kinto Web Service - Store, Sync, Share, and Self-Host.", 66 long_description="{}\n\n{}\n\n{}".format(README, CHANGELOG, CONTRIBUTORS), 67 license="Apache License (2.0)", 68 classifiers=[ 69 "Programming Language :: Python", 70 "Programming Language :: Python :: 3", 71 "Programming Language :: Python :: 3.6", 72 "Programming Language :: Python :: 3.7", 73 "Programming Language :: Python :: Implementation :: CPython", 74 "Topic :: Internet :: WWW/HTTP", 75 "Topic :: Internet :: WWW/HTTP :: WSGI :: Application", 76 "License :: OSI Approved :: Apache Software License", 77 ], 78 keywords="web sync json storage services", 79 author="Mozilla Services", 80 author_email="[email protected]", 81 url="https://github.com/Kinto/kinto", 82 packages=find_packages(), 83 package_data={"": ["*.rst", "*.py", "*.yaml"]}, 84 include_package_data=True, 85 zip_safe=False, 86 setup_requires=SETUP_REQUIRES, 87 tests_require=TEST_REQUIREMENTS, 88 install_requires=REQUIREMENTS, 89 extras_require={ 90 "redis": REDIS_REQUIRES, 91 "memcached": MEMCACHED_REQUIRES, 92 "postgresql": POSTGRESQL_REQUIRES, 93 "monitoring": MONITORING_REQUIRES, 94 }, 95 test_suite="tests", 96 dependency_links=DEPENDENCY_LINKS, 97 entry_points=ENTRY_POINTS, 98 ) 99 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kinto/schema_validation.py b/kinto/schema_validation.py --- a/kinto/schema_validation.py +++ b/kinto/schema_validation.py @@ -1,5 +1,11 @@ import colander -from jsonschema import Draft7Validator, ValidationError, SchemaError, RefResolutionError, validate +from jsonschema import ValidationError, SchemaError, RefResolutionError, validate + +try: # pragma: no cover + from jsonschema import Draft7Validator as DraftValidator +except ImportError: + from jsonschema import Draft4Validator as DraftValidator + from pyramid.settings import asbool from kinto.core import utils @@ -27,7 +33,7 @@ def check_schema(data): try: - Draft7Validator.check_schema(data) + DraftValidator.check_schema(data) except SchemaError as e: message = e.path.pop() + e.message raise ValidationError(message) diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ "cornice", "cornice_swagger >= 0.5.1", "dockerflow", - "jsonschema >= 3.0.0a1", + "jsonschema", "jsonpatch", "logging-color-formatter >= 1.0.1", # Message interpolations. "python-dateutil",
{"golden_diff": "diff --git a/kinto/schema_validation.py b/kinto/schema_validation.py\n--- a/kinto/schema_validation.py\n+++ b/kinto/schema_validation.py\n@@ -1,5 +1,11 @@\n import colander\n-from jsonschema import Draft7Validator, ValidationError, SchemaError, RefResolutionError, validate\n+from jsonschema import ValidationError, SchemaError, RefResolutionError, validate\n+\n+try: # pragma: no cover\n+ from jsonschema import Draft7Validator as DraftValidator\n+except ImportError:\n+ from jsonschema import Draft4Validator as DraftValidator\n+\n from pyramid.settings import asbool\n \n from kinto.core import utils\n@@ -27,7 +33,7 @@\n \n def check_schema(data):\n try:\n- Draft7Validator.check_schema(data)\n+ DraftValidator.check_schema(data)\n except SchemaError as e:\n message = e.path.pop() + e.message\n raise ValidationError(message)\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -24,7 +24,7 @@\n \"cornice\",\n \"cornice_swagger >= 0.5.1\",\n \"dockerflow\",\n- \"jsonschema >= 3.0.0a1\",\n+ \"jsonschema\",\n \"jsonpatch\",\n \"logging-color-formatter >= 1.0.1\", # Message interpolations.\n \"python-dateutil\",\n", "issue": "Fallback to jsonschema DraftV4 in case Draftv7 is not available\nMitigate jsonschema alfa version mandatory.\r\n\r\nThis is quite a blocker to be honest. I upgraded a bunch of packages related to Kinto and I guess we could mitigate the issue by fallbacking to Draftv4.\r\n\r\nRefs #1808 \n", "before_files": [{"content": "import colander\nfrom jsonschema import Draft7Validator, ValidationError, SchemaError, RefResolutionError, validate\nfrom pyramid.settings import asbool\n\nfrom kinto.core import utils\nfrom kinto.core.errors import raise_invalid\nfrom kinto.views import object_exists_or_404\n\n\nclass JSONSchemaMapping(colander.SchemaNode):\n def schema_type(self, **kw):\n return colander.Mapping(unknown=\"preserve\")\n\n def deserialize(self, cstruct=colander.null):\n # Start by deserializing a simple mapping.\n validated = super().deserialize(cstruct)\n\n # In case it is optional in parent schema.\n if not validated or validated in (colander.null, colander.drop):\n return validated\n try:\n check_schema(validated)\n except ValidationError as e:\n self.raise_invalid(e.message)\n return validated\n\n\ndef check_schema(data):\n try:\n Draft7Validator.check_schema(data)\n except SchemaError as e:\n message = e.path.pop() + e.message\n raise ValidationError(message)\n\n\ndef validate_schema(data, schema, ignore_fields=[]):\n required_fields = [f for f in schema.get(\"required\", []) if f not in ignore_fields]\n # jsonschema doesn't accept 'required': [] yet.\n # See https://github.com/Julian/jsonschema/issues/337.\n # In the meantime, strip out 'required' if no other fields are required.\n if required_fields:\n schema = {**schema, \"required\": required_fields}\n else:\n schema = {f: v for f, v in schema.items() if f != \"required\"}\n\n data = {f: v for f, v in data.items() if f not in ignore_fields}\n\n try:\n validate(data, schema)\n except ValidationError as e:\n if e.path:\n field = e.path[-1]\n elif e.validator_value:\n field = e.validator_value[-1]\n else:\n field = e.schema_path[-1]\n e.field = field\n raise e\n # Raise an error here if a reference in the schema doesn't resolve.\n # jsonschema doesn't provide schema validation checking upon creation yet,\n # it must be validated against data.\n # See https://github.com/Julian/jsonschema/issues/399\n # For future support https://github.com/Julian/jsonschema/issues/346.\n except RefResolutionError as e:\n raise e\n\n\ndef validate_from_bucket_schema_or_400(data, resource_name, request, ignore_fields=[]):\n \"\"\"Lookup in the parent objects if a schema was defined for this resource.\n\n If the schema validation feature is enabled, if a schema is/are defined, and if the\n data does not validate it/them, then it raises a 400 exception.\n \"\"\"\n settings = request.registry.settings\n schema_validation = \"experimental_collection_schema_validation\"\n # If disabled from settings, do nothing.\n if not asbool(settings.get(schema_validation)):\n return\n\n bucket_id = request.matchdict[\"bucket_id\"]\n bucket_uri = utils.instance_uri(request, \"bucket\", id=bucket_id)\n buckets = request.bound_data.setdefault(\"buckets\", {})\n if bucket_uri not in buckets:\n # Unknown yet, fetch from storage.\n bucket = object_exists_or_404(\n request, collection_id=\"bucket\", parent_id=\"\", object_id=bucket_id\n )\n buckets[bucket_uri] = bucket\n\n # Let's see if the bucket defines a schema for this resource.\n metadata_field = f\"{resource_name}:schema\"\n bucket = buckets[bucket_uri]\n if metadata_field not in bucket:\n return\n\n # Validate or fail with 400.\n schema = bucket[metadata_field]\n try:\n validate_schema(data, schema, ignore_fields=ignore_fields)\n except ValidationError as e:\n raise_invalid(request, name=e.field, description=e.message)\n except RefResolutionError as e:\n raise_invalid(request, name=\"schema\", description=str(e))\n", "path": "kinto/schema_validation.py"}, {"content": "import codecs\nimport os\nfrom setuptools import setup, find_packages\n\n# abspath here because setup.py may be __main__, in which case\n# __file__ is not guaranteed to be absolute\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read_file(filename):\n \"\"\"Open a related file and return its content.\"\"\"\n with codecs.open(os.path.join(here, filename), encoding=\"utf-8\") as f:\n content = f.read()\n return content\n\n\nREADME = read_file(\"README.rst\")\nCHANGELOG = read_file(\"CHANGELOG.rst\")\nCONTRIBUTORS = read_file(\"CONTRIBUTORS.rst\")\n\nREQUIREMENTS = [\n \"bcrypt\",\n \"colander >= 1.4.0\",\n \"cornice\",\n \"cornice_swagger >= 0.5.1\",\n \"dockerflow\",\n \"jsonschema >= 3.0.0a1\",\n \"jsonpatch\",\n \"logging-color-formatter >= 1.0.1\", # Message interpolations.\n \"python-dateutil\",\n \"pyramid >= 1.9.1, < 2.0\",\n \"pyramid_multiauth >= 0.8\", # User on policy selected event.\n \"transaction\",\n # pyramid_tm changed the location of their tween in 2.x and one of\n # our tests fails on 2.0.\n \"pyramid_tm >= 2.1\",\n \"requests\",\n \"waitress\",\n \"ujson >= 1.35\",\n]\n\nPOSTGRESQL_REQUIRES = [\"SQLAlchemy\", \"psycopg2 > 2.5\", \"zope.sqlalchemy\"]\n\nREDIS_REQUIRES = [\"kinto_redis\"]\n\nMEMCACHED_REQUIRES = [\"python-memcached\"]\n\nSETUP_REQUIRES = [\"pytest-runner\"]\n\nTEST_REQUIREMENTS = [\"bravado_core\", \"pytest\", \"WebTest\"]\n\nDEPENDENCY_LINKS = []\n\nMONITORING_REQUIRES = [\"raven\", \"statsd\", \"newrelic\", \"werkzeug\"]\n\nENTRY_POINTS = {\n \"paste.app_factory\": [\"main = kinto:main\"],\n \"console_scripts\": [\"kinto = kinto.__main__:main\"],\n}\n\n\nsetup(\n name=\"kinto\",\n version=\"11.3.0.dev0\",\n description=\"Kinto Web Service - Store, Sync, Share, and Self-Host.\",\n long_description=\"{}\\n\\n{}\\n\\n{}\".format(README, CHANGELOG, CONTRIBUTORS),\n license=\"Apache License (2.0)\",\n classifiers=[\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\",\n \"License :: OSI Approved :: Apache Software License\",\n ],\n keywords=\"web sync json storage services\",\n author=\"Mozilla Services\",\n author_email=\"[email protected]\",\n url=\"https://github.com/Kinto/kinto\",\n packages=find_packages(),\n package_data={\"\": [\"*.rst\", \"*.py\", \"*.yaml\"]},\n include_package_data=True,\n zip_safe=False,\n setup_requires=SETUP_REQUIRES,\n tests_require=TEST_REQUIREMENTS,\n install_requires=REQUIREMENTS,\n extras_require={\n \"redis\": REDIS_REQUIRES,\n \"memcached\": MEMCACHED_REQUIRES,\n \"postgresql\": POSTGRESQL_REQUIRES,\n \"monitoring\": MONITORING_REQUIRES,\n },\n test_suite=\"tests\",\n dependency_links=DEPENDENCY_LINKS,\n entry_points=ENTRY_POINTS,\n)\n", "path": "setup.py"}]}
2,679
307
gh_patches_debug_33968
rasdani/github-patches
git_diff
pypa__pip-2281
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> pip 6.0.3 weird symbols shown with download progress bar with pip 6.0.3 on Windows with cmd.exe ``` py -mpip install --upgrade setuptools Collecting setuptools from https://pypi.python.org/packages/3.4/s/setuptools/set uptools-8.3-py2.py3-none-any.whl#md5=a6c2914e2ae62227a5dfb6e908475b02 Downloading setuptools-8.3-py2.py3-none-any.whl (552kB) ←[K 100% |################################| 552kB 835kB/s ta 0:00:01 ←[?25hInstalling collected packages: setuptools Found existing installation: setuptools 7.0 Uninstalling setuptools-7.0: Successfully uninstalled setuptools-7.0 Successfully installed setuptools-8.3 ``` There's weird stuff with the progress bar, possibly control characers that cmd.exe terminal can't handle </issue> <code> [start of pip/utils/ui.py] 1 from __future__ import absolute_import 2 from __future__ import division 3 4 import itertools 5 import sys 6 7 from pip.utils import format_size 8 from pip.utils.logging import get_indentation 9 from pip._vendor.progress.bar import Bar 10 from pip._vendor.progress.helpers import WritelnMixin 11 from pip._vendor.progress.spinner import Spinner 12 13 14 class DownloadProgressMixin(object): 15 16 def __init__(self, *args, **kwargs): 17 super(DownloadProgressMixin, self).__init__(*args, **kwargs) 18 self.message = (" " * (get_indentation() + 2)) + self.message 19 20 @property 21 def downloaded(self): 22 return format_size(self.index) 23 24 @property 25 def download_speed(self): 26 # Avoid zero division errors... 27 if self.avg == 0.0: 28 return "..." 29 return format_size(1 / self.avg) + "/s" 30 31 @property 32 def pretty_eta(self): 33 if self.eta: 34 return "eta %s" % self.eta_td 35 return "" 36 37 def iter(self, it, n=1): 38 for x in it: 39 yield x 40 self.next(n) 41 self.finish() 42 43 44 class DownloadProgressBar(DownloadProgressMixin, Bar): 45 46 file = sys.stdout 47 message = "%(percent)d%%" 48 suffix = "%(downloaded)s %(download_speed)s %(pretty_eta)s" 49 50 51 class DownloadProgressSpinner(DownloadProgressMixin, WritelnMixin, Spinner): 52 53 file = sys.stdout 54 suffix = "%(downloaded)s %(download_speed)s" 55 56 def next_phase(self): 57 if not hasattr(self, "_phaser"): 58 self._phaser = itertools.cycle(self.phases) 59 return next(self._phaser) 60 61 def update(self): 62 message = self.message % self 63 phase = self.next_phase() 64 suffix = self.suffix % self 65 line = ''.join([ 66 message, 67 " " if message else "", 68 phase, 69 " " if suffix else "", 70 suffix, 71 ]) 72 73 self.writeln(line) 74 [end of pip/utils/ui.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pip/utils/ui.py b/pip/utils/ui.py --- a/pip/utils/ui.py +++ b/pip/utils/ui.py @@ -4,12 +4,20 @@ import itertools import sys +from pip.compat import WINDOWS from pip.utils import format_size from pip.utils.logging import get_indentation from pip._vendor.progress.bar import Bar from pip._vendor.progress.helpers import WritelnMixin from pip._vendor.progress.spinner import Spinner +try: + from pip._vendor import colorama +# Lots of different errors can come from this, including SystemError and +# ImportError. +except Exception: + colorama = None + class DownloadProgressMixin(object): @@ -41,14 +49,40 @@ self.finish() -class DownloadProgressBar(DownloadProgressMixin, Bar): +class WindowsMixin(object): + + def __init__(self, *args, **kwargs): + super(WindowsMixin, self).__init__(*args, **kwargs) + + # Check if we are running on Windows and we have the colorama module, + # if we do then wrap our file with it. + if WINDOWS and colorama: + self.file = colorama.AnsiToWin32(self.file) + # The progress code expects to be able to call self.file.isatty() + # but the colorama.AnsiToWin32() object doesn't have that, so we'll + # add it. + self.file.isatty = lambda: self.file.wrapped.isatty() + # The progress code expects to be able to call self.file.flush() + # but the colorama.AnsiToWin32() object doesn't have that, so we'll + # add it. + self.file.flush = lambda: self.file.wrapped.flush() + + # The Windows terminal does not support the hide/show cursor ANSI codes + # even with colorama. So we'll ensure that hide_cursor is False on + # Windows. + if WINDOWS and self.hide_cursor: + self.hide_cursor = False + + +class DownloadProgressBar(WindowsMixin, DownloadProgressMixin, Bar): file = sys.stdout message = "%(percent)d%%" suffix = "%(downloaded)s %(download_speed)s %(pretty_eta)s" -class DownloadProgressSpinner(DownloadProgressMixin, WritelnMixin, Spinner): +class DownloadProgressSpinner(WindowsMixin, DownloadProgressMixin, + WritelnMixin, Spinner): file = sys.stdout suffix = "%(downloaded)s %(download_speed)s"
{"golden_diff": "diff --git a/pip/utils/ui.py b/pip/utils/ui.py\n--- a/pip/utils/ui.py\n+++ b/pip/utils/ui.py\n@@ -4,12 +4,20 @@\n import itertools\n import sys\n \n+from pip.compat import WINDOWS\n from pip.utils import format_size\n from pip.utils.logging import get_indentation\n from pip._vendor.progress.bar import Bar\n from pip._vendor.progress.helpers import WritelnMixin\n from pip._vendor.progress.spinner import Spinner\n \n+try:\n+ from pip._vendor import colorama\n+# Lots of different errors can come from this, including SystemError and\n+# ImportError.\n+except Exception:\n+ colorama = None\n+\n \n class DownloadProgressMixin(object):\n \n@@ -41,14 +49,40 @@\n self.finish()\n \n \n-class DownloadProgressBar(DownloadProgressMixin, Bar):\n+class WindowsMixin(object):\n+\n+ def __init__(self, *args, **kwargs):\n+ super(WindowsMixin, self).__init__(*args, **kwargs)\n+\n+ # Check if we are running on Windows and we have the colorama module,\n+ # if we do then wrap our file with it.\n+ if WINDOWS and colorama:\n+ self.file = colorama.AnsiToWin32(self.file)\n+ # The progress code expects to be able to call self.file.isatty()\n+ # but the colorama.AnsiToWin32() object doesn't have that, so we'll\n+ # add it.\n+ self.file.isatty = lambda: self.file.wrapped.isatty()\n+ # The progress code expects to be able to call self.file.flush()\n+ # but the colorama.AnsiToWin32() object doesn't have that, so we'll\n+ # add it.\n+ self.file.flush = lambda: self.file.wrapped.flush()\n+\n+ # The Windows terminal does not support the hide/show cursor ANSI codes\n+ # even with colorama. So we'll ensure that hide_cursor is False on\n+ # Windows.\n+ if WINDOWS and self.hide_cursor:\n+ self.hide_cursor = False\n+\n+\n+class DownloadProgressBar(WindowsMixin, DownloadProgressMixin, Bar):\n \n file = sys.stdout\n message = \"%(percent)d%%\"\n suffix = \"%(downloaded)s %(download_speed)s %(pretty_eta)s\"\n \n \n-class DownloadProgressSpinner(DownloadProgressMixin, WritelnMixin, Spinner):\n+class DownloadProgressSpinner(WindowsMixin, DownloadProgressMixin,\n+ WritelnMixin, Spinner):\n \n file = sys.stdout\n suffix = \"%(downloaded)s %(download_speed)s\"\n", "issue": "pip 6.0.3 weird symbols shown with download progress bar\nwith pip 6.0.3 on Windows with cmd.exe\n\n```\npy -mpip install --upgrade setuptools\n\nCollecting setuptools from https://pypi.python.org/packages/3.4/s/setuptools/set\nuptools-8.3-py2.py3-none-any.whl#md5=a6c2914e2ae62227a5dfb6e908475b02\n Downloading setuptools-8.3-py2.py3-none-any.whl (552kB)\n\u2190[K 100% |################################| 552kB 835kB/s ta 0:00:01\n\u2190[?25hInstalling collected packages: setuptools\n Found existing installation: setuptools 7.0\n Uninstalling setuptools-7.0:\n Successfully uninstalled setuptools-7.0\n\nSuccessfully installed setuptools-8.3\n```\n\nThere's weird stuff with the progress bar, possibly control characers that cmd.exe terminal can't handle\n\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\n\nimport itertools\nimport sys\n\nfrom pip.utils import format_size\nfrom pip.utils.logging import get_indentation\nfrom pip._vendor.progress.bar import Bar\nfrom pip._vendor.progress.helpers import WritelnMixin\nfrom pip._vendor.progress.spinner import Spinner\n\n\nclass DownloadProgressMixin(object):\n\n def __init__(self, *args, **kwargs):\n super(DownloadProgressMixin, self).__init__(*args, **kwargs)\n self.message = (\" \" * (get_indentation() + 2)) + self.message\n\n @property\n def downloaded(self):\n return format_size(self.index)\n\n @property\n def download_speed(self):\n # Avoid zero division errors...\n if self.avg == 0.0:\n return \"...\"\n return format_size(1 / self.avg) + \"/s\"\n\n @property\n def pretty_eta(self):\n if self.eta:\n return \"eta %s\" % self.eta_td\n return \"\"\n\n def iter(self, it, n=1):\n for x in it:\n yield x\n self.next(n)\n self.finish()\n\n\nclass DownloadProgressBar(DownloadProgressMixin, Bar):\n\n file = sys.stdout\n message = \"%(percent)d%%\"\n suffix = \"%(downloaded)s %(download_speed)s %(pretty_eta)s\"\n\n\nclass DownloadProgressSpinner(DownloadProgressMixin, WritelnMixin, Spinner):\n\n file = sys.stdout\n suffix = \"%(downloaded)s %(download_speed)s\"\n\n def next_phase(self):\n if not hasattr(self, \"_phaser\"):\n self._phaser = itertools.cycle(self.phases)\n return next(self._phaser)\n\n def update(self):\n message = self.message % self\n phase = self.next_phase()\n suffix = self.suffix % self\n line = ''.join([\n message,\n \" \" if message else \"\",\n phase,\n \" \" if suffix else \"\",\n suffix,\n ])\n\n self.writeln(line)\n", "path": "pip/utils/ui.py"}]}
1,349
565
gh_patches_debug_24947
rasdani/github-patches
git_diff
boto__boto-2111
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> CloudFormation ValidateTemplate response is not parsed correctly. I'm running Python 2.7.5 and boto 2.24.0, and I noticed an issue with the result returned by CloudFormationConnection.validate_template(). It appears the stack capabilities is not properly parsed. Here's a code snippet that tickles the issue - the CloudFormation template (iam-user.json) is pasted below. ``` Python import boto import logging boto_logger = logging.getLogger('boto') boto_logger.setLevel(logging.DEBUG) tpl_file = open('iam-user.json') cfn_template_body = tpl_file.read() tpl_file.close() cfn_conn = boto.connect_cloudformation() validation_response = cfn_conn.validate_template(template_body=cfn_template_body) ``` When inspecting Capabilities in the response, I see a string with a newline and spaces, rather than a list that includes something like CAPABILITY_IAM: ``` Python In [11]: validation_response.Capabilities Out[11]: u'\n ' ``` Here's the full output of the code example: ``` Python In [2]: import boto In [3]: import logging In [4]: boto_logger = logging.getLogger('boto') In [5]: boto_logger.setLevel(logging.DEBUG) In [6]: tpl_file = open('iam-user.json') In [7]: cfn_template_body = tpl_file.read() In [8]: tpl_file.close() In [9]: cfn_conn = boto.connect_cloudformation() DEBUG:boto:Using access key found in environment variable. DEBUG:boto:Using secret key found in config file. In [10]: validation_response = cfn_conn.validate_template(template_body=cfn_template_body) DEBUG:boto:Method: POST DEBUG:boto:Path: / DEBUG:boto:Data: DEBUG:boto:Headers: {} DEBUG:boto:Host: cloudformation.us-east-1.amazonaws.com DEBUG:boto:Port: 443 DEBUG:boto:Params: {'Action': 'ValidateTemplate', 'Version': '2010-05-15', 'TemplateBody': '{\n "AWSTemplateFormatVersion" : "2010-09-09",\n "Description" : "Test",\n\n "Resources" : {\n\n "IamUser" : {\n "Type" : "AWS::IAM::User",\n "Properties" : {\n "Path" : "/",\n "Policies" : [{\n "PolicyName" : "CfnInit",\n "PolicyDocument": {\n "Statement":[{\n "Effect" : "Allow",\n "Action" : [ "cloudformation:DescribeStackResource" ],\n "Resource" : "*"\n }]\n }\n }]\n }\n },\n\n "CfnKeys" : {\n "Type" : "AWS::IAM::AccessKey",\n "Properties" : {\n "UserName" : { "Ref" : "IamUser" }\n }\n }\n },\n\n "Outputs" : {\n "User" : {\n "Description" : "IAM User",\n "Value" : { "Ref" : "IamUser" }\n },\n "AccessKeyId" : {\n "Description" : "AccessKeyId",\n "Value" : { "Ref" : "CfnKeys" }\n }\n }\n}\n'} DEBUG:boto:establishing HTTPS connection: host=cloudformation.us-east-1.amazonaws.com, kwargs={'port': 443, 'timeout': 70} DEBUG:boto:Token: None DEBUG:boto:CanonicalRequest: POST / host:cloudformation.us-east-1.amazonaws.com x-amz-date:20140204T211547Z host;x-amz-date ed2b6b57e57a95cefe1ed9a0a8a1d6cbb92060ccda448706ce9a7577107045bc DEBUG:boto:StringToSign: AWS4-HMAC-SHA256 20140204T211547Z 20140204/us-east-1/cloudformation/aws4_request f2704f5688594252d7cd4f721c7a7e03d51d41a0c3f33e82a1f58bfba97fcafa DEBUG:boto:Signature: ac40d1a913cf3ca67cc2b80bc05e7c1f364eb02e83bb3c2c96ea18241f183564 DEBUG:boto:wrapping ssl socket; CA certificate file=/Users/vrivellino/.devninja.env/lib/python2.7/site-packages/boto/cacerts/cacerts.txt DEBUG:boto:validating server certificate: hostname=cloudformation.us-east-1.amazonaws.com, certificate hosts=['cloudformation.us-east-1.amazonaws.com'] DEBUG:boto:<ValidateTemplateResponse xmlns="http://cloudformation.amazonaws.com/doc/2010-05-15/"> <ValidateTemplateResult> <Description>Test</Description> <CapabilitiesReason>The following resource(s) require capabilities: [AWS::IAM::User, AWS::IAM::AccessKey]</CapabilitiesReason> <Capabilities> <member>CAPABILITY_IAM</member> </Capabilities> <Parameters/> </ValidateTemplateResult> <ResponseMetadata> <RequestId>8465355d-8de1-11e3-ab21-addb98855f3a</RequestId> </ResponseMetadata> </ValidateTemplateResponse> In [11]: validation_response.Capabilities Out[11]: u'\n ' ``` Here's the CloudFormation template I used: ``` JSON { "AWSTemplateFormatVersion" : "2010-09-09", "Description" : "Test", "Resources" : { "IamUser" : { "Type" : "AWS::IAM::User", "Properties" : { "Path" : "/", "Policies" : [{ "PolicyName" : "CfnInit", "PolicyDocument": { "Statement":[{ "Effect" : "Allow", "Action" : [ "cloudformation:DescribeStackResource" ], "Resource" : "*" }] } }] } }, "CfnKeys" : { "Type" : "AWS::IAM::AccessKey", "Properties" : { "UserName" : { "Ref" : "IamUser" } } } }, "Outputs" : { "User" : { "Description" : "IAM User", "Value" : { "Ref" : "IamUser" } }, "AccessKeyId" : { "Description" : "AccessKeyId", "Value" : { "Ref" : "CfnKeys" } } } } ``` Just for reference, this is the output when using the AWS cli tools to validate the template: ``` $ aws cloudformation validate-template --template-body file://./iam-user.json { "CapabilitiesReason": "The following resource(s) require capabilities: [AWS::IAM::User, AWS::IAM::AccessKey]", "Description": "Test", "Parameters": [], "Capabilities": [ "CAPABILITY_IAM" ] } ``` </issue> <code> [start of boto/cloudformation/template.py] 1 from boto.resultset import ResultSet 2 3 class Template(object): 4 def __init__(self, connection=None): 5 self.connection = connection 6 self.description = None 7 self.template_parameters = None 8 9 def startElement(self, name, attrs, connection): 10 if name == "Parameters": 11 self.template_parameters = ResultSet([('member', TemplateParameter)]) 12 return self.template_parameters 13 else: 14 return None 15 16 def endElement(self, name, value, connection): 17 if name == "Description": 18 self.description = value 19 else: 20 setattr(self, name, value) 21 22 class TemplateParameter(object): 23 def __init__(self, parent): 24 self.parent = parent 25 self.default_value = None 26 self.description = None 27 self.no_echo = None 28 self.parameter_key = None 29 30 def startElement(self, name, attrs, connection): 31 return None 32 33 def endElement(self, name, value, connection): 34 if name == "DefaultValue": 35 self.default_value = value 36 elif name == "Description": 37 self.description = value 38 elif name == "NoEcho": 39 self.no_echo = bool(value) 40 elif name == "ParameterKey": 41 self.parameter_key = value 42 else: 43 setattr(self, name, value) 44 [end of boto/cloudformation/template.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/boto/cloudformation/template.py b/boto/cloudformation/template.py --- a/boto/cloudformation/template.py +++ b/boto/cloudformation/template.py @@ -1,21 +1,29 @@ from boto.resultset import ResultSet +from boto.cloudformation.stack import Capability class Template(object): def __init__(self, connection=None): self.connection = connection self.description = None self.template_parameters = None + self.capabilities_reason = None + self.capabilities = None def startElement(self, name, attrs, connection): if name == "Parameters": self.template_parameters = ResultSet([('member', TemplateParameter)]) return self.template_parameters + elif name == "Capabilities": + self.capabilities = ResultSet([('member', Capability)]) + return self.capabilities else: return None def endElement(self, name, value, connection): if name == "Description": self.description = value + elif name == "CapabilitiesReason": + self.capabilities_reason = value else: setattr(self, name, value)
{"golden_diff": "diff --git a/boto/cloudformation/template.py b/boto/cloudformation/template.py\n--- a/boto/cloudformation/template.py\n+++ b/boto/cloudformation/template.py\n@@ -1,21 +1,29 @@\n from boto.resultset import ResultSet\n+from boto.cloudformation.stack import Capability\n \n class Template(object):\n def __init__(self, connection=None):\n self.connection = connection\n self.description = None\n self.template_parameters = None\n+ self.capabilities_reason = None\n+ self.capabilities = None\n \n def startElement(self, name, attrs, connection):\n if name == \"Parameters\":\n self.template_parameters = ResultSet([('member', TemplateParameter)])\n return self.template_parameters\n+ elif name == \"Capabilities\":\n+ self.capabilities = ResultSet([('member', Capability)])\n+ return self.capabilities\n else:\n return None\n \n def endElement(self, name, value, connection):\n if name == \"Description\":\n self.description = value\n+ elif name == \"CapabilitiesReason\":\n+ self.capabilities_reason = value\n else:\n setattr(self, name, value)\n", "issue": "CloudFormation ValidateTemplate response is not parsed correctly.\nI'm running Python 2.7.5 and boto 2.24.0, and I noticed an issue with the result returned by CloudFormationConnection.validate_template(). It appears the stack capabilities is not properly parsed.\n\nHere's a code snippet that tickles the issue - the CloudFormation template (iam-user.json) is pasted below.\n\n``` Python\nimport boto\nimport logging\nboto_logger = logging.getLogger('boto')\nboto_logger.setLevel(logging.DEBUG)\ntpl_file = open('iam-user.json')\ncfn_template_body = tpl_file.read()\ntpl_file.close()\ncfn_conn = boto.connect_cloudformation()\nvalidation_response = cfn_conn.validate_template(template_body=cfn_template_body)\n```\n\nWhen inspecting Capabilities in the response, I see a string with a newline and spaces, rather than a list that includes something like CAPABILITY_IAM:\n\n``` Python\nIn [11]: validation_response.Capabilities\nOut[11]: u'\\n '\n```\n\nHere's the full output of the code example:\n\n``` Python\nIn [2]: import boto\n\nIn [3]: import logging\n\nIn [4]: boto_logger = logging.getLogger('boto')\n\nIn [5]: boto_logger.setLevel(logging.DEBUG)\n\nIn [6]: tpl_file = open('iam-user.json')\n\nIn [7]: cfn_template_body = tpl_file.read()\n\nIn [8]: tpl_file.close()\n\nIn [9]: cfn_conn = boto.connect_cloudformation()\nDEBUG:boto:Using access key found in environment variable.\nDEBUG:boto:Using secret key found in config file.\n\nIn [10]: validation_response = cfn_conn.validate_template(template_body=cfn_template_body)\nDEBUG:boto:Method: POST\nDEBUG:boto:Path: /\nDEBUG:boto:Data: \nDEBUG:boto:Headers: {}\nDEBUG:boto:Host: cloudformation.us-east-1.amazonaws.com\nDEBUG:boto:Port: 443\nDEBUG:boto:Params: {'Action': 'ValidateTemplate', 'Version': '2010-05-15', 'TemplateBody': '{\\n \"AWSTemplateFormatVersion\" : \"2010-09-09\",\\n \"Description\" : \"Test\",\\n\\n \"Resources\" : {\\n\\n \"IamUser\" : {\\n \"Type\" : \"AWS::IAM::User\",\\n \"Properties\" : {\\n \"Path\" : \"/\",\\n \"Policies\" : [{\\n \"PolicyName\" : \"CfnInit\",\\n \"PolicyDocument\": {\\n \"Statement\":[{\\n \"Effect\" : \"Allow\",\\n \"Action\" : [ \"cloudformation:DescribeStackResource\" ],\\n \"Resource\" : \"*\"\\n }]\\n }\\n }]\\n }\\n },\\n\\n \"CfnKeys\" : {\\n \"Type\" : \"AWS::IAM::AccessKey\",\\n \"Properties\" : {\\n \"UserName\" : { \"Ref\" : \"IamUser\" }\\n }\\n }\\n },\\n\\n \"Outputs\" : {\\n \"User\" : {\\n \"Description\" : \"IAM User\",\\n \"Value\" : { \"Ref\" : \"IamUser\" }\\n },\\n \"AccessKeyId\" : {\\n \"Description\" : \"AccessKeyId\",\\n \"Value\" : { \"Ref\" : \"CfnKeys\" }\\n }\\n }\\n}\\n'}\nDEBUG:boto:establishing HTTPS connection: host=cloudformation.us-east-1.amazonaws.com, kwargs={'port': 443, 'timeout': 70}\nDEBUG:boto:Token: None\nDEBUG:boto:CanonicalRequest:\nPOST\n/\n\nhost:cloudformation.us-east-1.amazonaws.com\nx-amz-date:20140204T211547Z\n\nhost;x-amz-date\ned2b6b57e57a95cefe1ed9a0a8a1d6cbb92060ccda448706ce9a7577107045bc\nDEBUG:boto:StringToSign:\nAWS4-HMAC-SHA256\n20140204T211547Z\n20140204/us-east-1/cloudformation/aws4_request\nf2704f5688594252d7cd4f721c7a7e03d51d41a0c3f33e82a1f58bfba97fcafa\nDEBUG:boto:Signature:\nac40d1a913cf3ca67cc2b80bc05e7c1f364eb02e83bb3c2c96ea18241f183564\nDEBUG:boto:wrapping ssl socket; CA certificate file=/Users/vrivellino/.devninja.env/lib/python2.7/site-packages/boto/cacerts/cacerts.txt\nDEBUG:boto:validating server certificate: hostname=cloudformation.us-east-1.amazonaws.com, certificate hosts=['cloudformation.us-east-1.amazonaws.com']\nDEBUG:boto:<ValidateTemplateResponse xmlns=\"http://cloudformation.amazonaws.com/doc/2010-05-15/\">\n <ValidateTemplateResult>\n <Description>Test</Description>\n <CapabilitiesReason>The following resource(s) require capabilities: [AWS::IAM::User, AWS::IAM::AccessKey]</CapabilitiesReason>\n <Capabilities>\n <member>CAPABILITY_IAM</member>\n </Capabilities>\n <Parameters/>\n </ValidateTemplateResult>\n <ResponseMetadata>\n <RequestId>8465355d-8de1-11e3-ab21-addb98855f3a</RequestId>\n </ResponseMetadata>\n</ValidateTemplateResponse>\n\nIn [11]: validation_response.Capabilities\nOut[11]: u'\\n '\n```\n\nHere's the CloudFormation template I used:\n\n``` JSON\n{\n \"AWSTemplateFormatVersion\" : \"2010-09-09\",\n \"Description\" : \"Test\",\n\n \"Resources\" : {\n\n \"IamUser\" : {\n \"Type\" : \"AWS::IAM::User\",\n \"Properties\" : {\n \"Path\" : \"/\",\n \"Policies\" : [{\n \"PolicyName\" : \"CfnInit\",\n \"PolicyDocument\": {\n \"Statement\":[{\n \"Effect\" : \"Allow\",\n \"Action\" : [ \"cloudformation:DescribeStackResource\" ],\n \"Resource\" : \"*\"\n }]\n }\n }]\n }\n },\n\n \"CfnKeys\" : {\n \"Type\" : \"AWS::IAM::AccessKey\",\n \"Properties\" : {\n \"UserName\" : { \"Ref\" : \"IamUser\" }\n }\n }\n },\n\n \"Outputs\" : {\n \"User\" : {\n \"Description\" : \"IAM User\",\n \"Value\" : { \"Ref\" : \"IamUser\" }\n },\n \"AccessKeyId\" : {\n \"Description\" : \"AccessKeyId\",\n \"Value\" : { \"Ref\" : \"CfnKeys\" }\n }\n }\n}\n```\n\nJust for reference, this is the output when using the AWS cli tools to validate the template:\n\n```\n$ aws cloudformation validate-template --template-body file://./iam-user.json\n{\n \"CapabilitiesReason\": \"The following resource(s) require capabilities: [AWS::IAM::User, AWS::IAM::AccessKey]\",\n \"Description\": \"Test\",\n \"Parameters\": [],\n \"Capabilities\": [\n \"CAPABILITY_IAM\"\n ]\n}\n```\n\n", "before_files": [{"content": "from boto.resultset import ResultSet\n\nclass Template(object):\n def __init__(self, connection=None):\n self.connection = connection\n self.description = None\n self.template_parameters = None\n\n def startElement(self, name, attrs, connection):\n if name == \"Parameters\":\n self.template_parameters = ResultSet([('member', TemplateParameter)])\n return self.template_parameters\n else:\n return None\n\n def endElement(self, name, value, connection):\n if name == \"Description\":\n self.description = value\n else:\n setattr(self, name, value)\n\nclass TemplateParameter(object):\n def __init__(self, parent):\n self.parent = parent\n self.default_value = None\n self.description = None\n self.no_echo = None\n self.parameter_key = None\n\n def startElement(self, name, attrs, connection):\n return None\n\n def endElement(self, name, value, connection):\n if name == \"DefaultValue\":\n self.default_value = value\n elif name == \"Description\":\n self.description = value\n elif name == \"NoEcho\":\n self.no_echo = bool(value)\n elif name == \"ParameterKey\":\n self.parameter_key = value\n else:\n setattr(self, name, value)\n", "path": "boto/cloudformation/template.py"}]}
2,593
239
gh_patches_debug_13504
rasdani/github-patches
git_diff
ipython__ipython-3640
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> setp() issue in ipython notebook with figure references In IPython notebook, when I run `setp()` to change parameters of a figure created in another cell and accessed with its reference, an exception occurs ``` python --------------------------------------------------------------------------- AttributeError Traceback (most recent call last) <ipython-input-82-54a641ac28e1> in <module>() ----> 1 setp(axis.lines[0], linewidth=2) /home/amir/virtualenv/cr/local/lib/python2.7/site-packages/matplotlib/pyplot.pyc in setp(*args, **kwargs) 238 def setp(*args, **kwargs): 239 ret = _setp(*args, **kwargs) --> 240 draw_if_interactive() 241 return ret 242 /home/amir/virtualenv/cr/local/lib/python2.7/site-packages/IPython/zmq/pylab/backend_inline.py in draw_if_interactive() 123 # flush_figures() will act upon these values 124 --> 125 fig = Gcf.get_active().canvas.figure 126 127 # Hack: matplotlib FigureManager objects in interacive backends (at least AttributeError: 'NoneType' object has no attribute 'canvas' ``` Nevertheless, `setp()` successfully does its job but since IPython notebook by default closes the figures of previous cells the exception occurs. </issue> <code> [start of IPython/kernel/zmq/pylab/backend_inline.py] 1 """Produce SVG versions of active plots for display by the rich Qt frontend. 2 """ 3 #----------------------------------------------------------------------------- 4 # Imports 5 #----------------------------------------------------------------------------- 6 from __future__ import print_function 7 8 # Third-party imports 9 import matplotlib 10 from matplotlib.backends.backend_agg import new_figure_manager, FigureCanvasAgg 11 from matplotlib._pylab_helpers import Gcf 12 13 # Local imports. 14 from IPython.config.configurable import SingletonConfigurable 15 from IPython.core.display import display 16 from IPython.core.displaypub import publish_display_data 17 from IPython.core.pylabtools import print_figure, select_figure_format 18 from IPython.utils.traitlets import Dict, Instance, CaselessStrEnum, Bool 19 from IPython.utils.warn import warn 20 21 #----------------------------------------------------------------------------- 22 # Configurable for inline backend options 23 #----------------------------------------------------------------------------- 24 # inherit from InlineBackendConfig for deprecation purposes 25 class InlineBackendConfig(SingletonConfigurable): 26 pass 27 28 class InlineBackend(InlineBackendConfig): 29 """An object to store configuration of the inline backend.""" 30 31 def _config_changed(self, name, old, new): 32 # warn on change of renamed config section 33 if new.InlineBackendConfig != old.InlineBackendConfig: 34 warn("InlineBackendConfig has been renamed to InlineBackend") 35 super(InlineBackend, self)._config_changed(name, old, new) 36 37 # The typical default figure size is too large for inline use, 38 # so we shrink the figure size to 6x4, and tweak fonts to 39 # make that fit. 40 rc = Dict({'figure.figsize': (6.0,4.0), 41 # play nicely with white background in the Qt and notebook frontend 42 'figure.facecolor': 'white', 43 'figure.edgecolor': 'white', 44 # 12pt labels get cutoff on 6x4 logplots, so use 10pt. 45 'font.size': 10, 46 # 72 dpi matches SVG/qtconsole 47 # this only affects PNG export, as SVG has no dpi setting 48 'savefig.dpi': 72, 49 # 10pt still needs a little more room on the xlabel: 50 'figure.subplot.bottom' : .125 51 }, config=True, 52 help="""Subset of matplotlib rcParams that should be different for the 53 inline backend.""" 54 ) 55 56 figure_format = CaselessStrEnum(['svg', 'png', 'retina'], default_value='png', config=True, 57 help="The image format for figures with the inline backend.") 58 59 def _figure_format_changed(self, name, old, new): 60 if self.shell is None: 61 return 62 else: 63 select_figure_format(self.shell, new) 64 65 close_figures = Bool(True, config=True, 66 help="""Close all figures at the end of each cell. 67 68 When True, ensures that each cell starts with no active figures, but it 69 also means that one must keep track of references in order to edit or 70 redraw figures in subsequent cells. This mode is ideal for the notebook, 71 where residual plots from other cells might be surprising. 72 73 When False, one must call figure() to create new figures. This means 74 that gcf() and getfigs() can reference figures created in other cells, 75 and the active figure can continue to be edited with pylab/pyplot 76 methods that reference the current active figure. This mode facilitates 77 iterative editing of figures, and behaves most consistently with 78 other matplotlib backends, but figure barriers between cells must 79 be explicit. 80 """) 81 82 shell = Instance('IPython.core.interactiveshell.InteractiveShellABC') 83 84 85 #----------------------------------------------------------------------------- 86 # Functions 87 #----------------------------------------------------------------------------- 88 89 def show(close=None): 90 """Show all figures as SVG/PNG payloads sent to the IPython clients. 91 92 Parameters 93 ---------- 94 close : bool, optional 95 If true, a ``plt.close('all')`` call is automatically issued after 96 sending all the figures. If this is set, the figures will entirely 97 removed from the internal list of figures. 98 """ 99 if close is None: 100 close = InlineBackend.instance().close_figures 101 try: 102 for figure_manager in Gcf.get_all_fig_managers(): 103 display(figure_manager.canvas.figure) 104 finally: 105 show._to_draw = [] 106 if close: 107 matplotlib.pyplot.close('all') 108 109 110 111 # This flag will be reset by draw_if_interactive when called 112 show._draw_called = False 113 # list of figures to draw when flush_figures is called 114 show._to_draw = [] 115 116 117 def draw_if_interactive(): 118 """ 119 Is called after every pylab drawing command 120 """ 121 # signal that the current active figure should be sent at the end of 122 # execution. Also sets the _draw_called flag, signaling that there will be 123 # something to send. At the end of the code execution, a separate call to 124 # flush_figures() will act upon these values 125 126 fig = Gcf.get_active().canvas.figure 127 128 # Hack: matplotlib FigureManager objects in interacive backends (at least 129 # in some of them) monkeypatch the figure object and add a .show() method 130 # to it. This applies the same monkeypatch in order to support user code 131 # that might expect `.show()` to be part of the official API of figure 132 # objects. 133 # For further reference: 134 # https://github.com/ipython/ipython/issues/1612 135 # https://github.com/matplotlib/matplotlib/issues/835 136 137 if not hasattr(fig, 'show'): 138 # Queue up `fig` for display 139 fig.show = lambda *a: display(fig) 140 141 # If matplotlib was manually set to non-interactive mode, this function 142 # should be a no-op (otherwise we'll generate duplicate plots, since a user 143 # who set ioff() manually expects to make separate draw/show calls). 144 if not matplotlib.is_interactive(): 145 return 146 147 # ensure current figure will be drawn, and each subsequent call 148 # of draw_if_interactive() moves the active figure to ensure it is 149 # drawn last 150 try: 151 show._to_draw.remove(fig) 152 except ValueError: 153 # ensure it only appears in the draw list once 154 pass 155 # Queue up the figure for drawing in next show() call 156 show._to_draw.append(fig) 157 show._draw_called = True 158 159 160 def flush_figures(): 161 """Send all figures that changed 162 163 This is meant to be called automatically and will call show() if, during 164 prior code execution, there had been any calls to draw_if_interactive. 165 166 This function is meant to be used as a post_execute callback in IPython, 167 so user-caused errors are handled with showtraceback() instead of being 168 allowed to raise. If this function is not called from within IPython, 169 then these exceptions will raise. 170 """ 171 if not show._draw_called: 172 return 173 174 if InlineBackend.instance().close_figures: 175 # ignore the tracking, just draw and close all figures 176 try: 177 return show(True) 178 except Exception as e: 179 # safely show traceback if in IPython, else raise 180 try: 181 get_ipython 182 except NameError: 183 raise e 184 else: 185 get_ipython().showtraceback() 186 return 187 try: 188 # exclude any figures that were closed: 189 active = set([fm.canvas.figure for fm in Gcf.get_all_fig_managers()]) 190 for fig in [ fig for fig in show._to_draw if fig in active ]: 191 try: 192 display(fig) 193 except Exception as e: 194 # safely show traceback if in IPython, else raise 195 try: 196 get_ipython 197 except NameError: 198 raise e 199 else: 200 get_ipython().showtraceback() 201 break 202 finally: 203 # clear flags for next round 204 show._to_draw = [] 205 show._draw_called = False 206 207 208 # Changes to matplotlib in version 1.2 requires a mpl backend to supply a default 209 # figurecanvas. This is set here to a Agg canvas 210 # See https://github.com/matplotlib/matplotlib/pull/1125 211 FigureCanvas = FigureCanvasAgg 212 213 [end of IPython/kernel/zmq/pylab/backend_inline.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/IPython/kernel/zmq/pylab/backend_inline.py b/IPython/kernel/zmq/pylab/backend_inline.py --- a/IPython/kernel/zmq/pylab/backend_inline.py +++ b/IPython/kernel/zmq/pylab/backend_inline.py @@ -122,8 +122,10 @@ # execution. Also sets the _draw_called flag, signaling that there will be # something to send. At the end of the code execution, a separate call to # flush_figures() will act upon these values - - fig = Gcf.get_active().canvas.figure + manager = Gcf.get_active() + if manager is None: + return + fig = manager.canvas.figure # Hack: matplotlib FigureManager objects in interacive backends (at least # in some of them) monkeypatch the figure object and add a .show() method
{"golden_diff": "diff --git a/IPython/kernel/zmq/pylab/backend_inline.py b/IPython/kernel/zmq/pylab/backend_inline.py\n--- a/IPython/kernel/zmq/pylab/backend_inline.py\n+++ b/IPython/kernel/zmq/pylab/backend_inline.py\n@@ -122,8 +122,10 @@\n # execution. Also sets the _draw_called flag, signaling that there will be\n # something to send. At the end of the code execution, a separate call to\n # flush_figures() will act upon these values\n-\n- fig = Gcf.get_active().canvas.figure\n+ manager = Gcf.get_active()\n+ if manager is None:\n+ return\n+ fig = manager.canvas.figure\n \n # Hack: matplotlib FigureManager objects in interacive backends (at least\n # in some of them) monkeypatch the figure object and add a .show() method\n", "issue": "setp() issue in ipython notebook with figure references\nIn IPython notebook, when I run `setp()` to change parameters of a figure created in another cell and accessed with its reference, an exception occurs\n\n``` python\n---------------------------------------------------------------------------\nAttributeError Traceback (most recent call last)\n<ipython-input-82-54a641ac28e1> in <module>()\n----> 1 setp(axis.lines[0], linewidth=2)\n\n/home/amir/virtualenv/cr/local/lib/python2.7/site-packages/matplotlib/pyplot.pyc in setp(*args, **kwargs)\n 238 def setp(*args, **kwargs):\n 239 ret = _setp(*args, **kwargs)\n--> 240 draw_if_interactive()\n 241 return ret\n 242 \n\n/home/amir/virtualenv/cr/local/lib/python2.7/site-packages/IPython/zmq/pylab/backend_inline.py in draw_if_interactive()\n 123 # flush_figures() will act upon these values\n 124 \n--> 125 fig = Gcf.get_active().canvas.figure\n 126 \n 127 # Hack: matplotlib FigureManager objects in interacive backends (at least\n\nAttributeError: 'NoneType' object has no attribute 'canvas'\n```\n\nNevertheless, `setp()` successfully does its job but since IPython notebook by default closes the figures of previous cells the exception occurs.\n\n", "before_files": [{"content": "\"\"\"Produce SVG versions of active plots for display by the rich Qt frontend.\n\"\"\"\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\nfrom __future__ import print_function\n\n# Third-party imports\nimport matplotlib\nfrom matplotlib.backends.backend_agg import new_figure_manager, FigureCanvasAgg\nfrom matplotlib._pylab_helpers import Gcf\n\n# Local imports.\nfrom IPython.config.configurable import SingletonConfigurable\nfrom IPython.core.display import display\nfrom IPython.core.displaypub import publish_display_data\nfrom IPython.core.pylabtools import print_figure, select_figure_format\nfrom IPython.utils.traitlets import Dict, Instance, CaselessStrEnum, Bool\nfrom IPython.utils.warn import warn\n\n#-----------------------------------------------------------------------------\n# Configurable for inline backend options\n#-----------------------------------------------------------------------------\n# inherit from InlineBackendConfig for deprecation purposes\nclass InlineBackendConfig(SingletonConfigurable):\n pass\n\nclass InlineBackend(InlineBackendConfig):\n \"\"\"An object to store configuration of the inline backend.\"\"\"\n\n def _config_changed(self, name, old, new):\n # warn on change of renamed config section\n if new.InlineBackendConfig != old.InlineBackendConfig:\n warn(\"InlineBackendConfig has been renamed to InlineBackend\")\n super(InlineBackend, self)._config_changed(name, old, new)\n\n # The typical default figure size is too large for inline use,\n # so we shrink the figure size to 6x4, and tweak fonts to\n # make that fit.\n rc = Dict({'figure.figsize': (6.0,4.0),\n # play nicely with white background in the Qt and notebook frontend\n 'figure.facecolor': 'white',\n 'figure.edgecolor': 'white',\n # 12pt labels get cutoff on 6x4 logplots, so use 10pt.\n 'font.size': 10,\n # 72 dpi matches SVG/qtconsole\n # this only affects PNG export, as SVG has no dpi setting\n 'savefig.dpi': 72,\n # 10pt still needs a little more room on the xlabel:\n 'figure.subplot.bottom' : .125\n }, config=True,\n help=\"\"\"Subset of matplotlib rcParams that should be different for the\n inline backend.\"\"\"\n )\n\n figure_format = CaselessStrEnum(['svg', 'png', 'retina'], default_value='png', config=True,\n help=\"The image format for figures with the inline backend.\")\n\n def _figure_format_changed(self, name, old, new):\n if self.shell is None:\n return\n else:\n select_figure_format(self.shell, new)\n \n close_figures = Bool(True, config=True,\n help=\"\"\"Close all figures at the end of each cell.\n \n When True, ensures that each cell starts with no active figures, but it\n also means that one must keep track of references in order to edit or\n redraw figures in subsequent cells. This mode is ideal for the notebook,\n where residual plots from other cells might be surprising.\n \n When False, one must call figure() to create new figures. This means\n that gcf() and getfigs() can reference figures created in other cells,\n and the active figure can continue to be edited with pylab/pyplot\n methods that reference the current active figure. This mode facilitates\n iterative editing of figures, and behaves most consistently with\n other matplotlib backends, but figure barriers between cells must\n be explicit.\n \"\"\")\n\n shell = Instance('IPython.core.interactiveshell.InteractiveShellABC')\n\n\n#-----------------------------------------------------------------------------\n# Functions\n#-----------------------------------------------------------------------------\n\ndef show(close=None):\n \"\"\"Show all figures as SVG/PNG payloads sent to the IPython clients.\n\n Parameters\n ----------\n close : bool, optional\n If true, a ``plt.close('all')`` call is automatically issued after\n sending all the figures. If this is set, the figures will entirely\n removed from the internal list of figures.\n \"\"\"\n if close is None:\n close = InlineBackend.instance().close_figures\n try:\n for figure_manager in Gcf.get_all_fig_managers():\n display(figure_manager.canvas.figure)\n finally:\n show._to_draw = []\n if close:\n matplotlib.pyplot.close('all')\n\n\n\n# This flag will be reset by draw_if_interactive when called\nshow._draw_called = False\n# list of figures to draw when flush_figures is called\nshow._to_draw = []\n\n\ndef draw_if_interactive():\n \"\"\"\n Is called after every pylab drawing command\n \"\"\"\n # signal that the current active figure should be sent at the end of\n # execution. Also sets the _draw_called flag, signaling that there will be\n # something to send. At the end of the code execution, a separate call to\n # flush_figures() will act upon these values\n\n fig = Gcf.get_active().canvas.figure\n\n # Hack: matplotlib FigureManager objects in interacive backends (at least\n # in some of them) monkeypatch the figure object and add a .show() method\n # to it. This applies the same monkeypatch in order to support user code\n # that might expect `.show()` to be part of the official API of figure\n # objects.\n # For further reference:\n # https://github.com/ipython/ipython/issues/1612\n # https://github.com/matplotlib/matplotlib/issues/835\n \n if not hasattr(fig, 'show'):\n # Queue up `fig` for display\n fig.show = lambda *a: display(fig)\n\n # If matplotlib was manually set to non-interactive mode, this function\n # should be a no-op (otherwise we'll generate duplicate plots, since a user\n # who set ioff() manually expects to make separate draw/show calls).\n if not matplotlib.is_interactive():\n return\n\n # ensure current figure will be drawn, and each subsequent call\n # of draw_if_interactive() moves the active figure to ensure it is\n # drawn last\n try:\n show._to_draw.remove(fig)\n except ValueError:\n # ensure it only appears in the draw list once\n pass\n # Queue up the figure for drawing in next show() call\n show._to_draw.append(fig)\n show._draw_called = True\n\n\ndef flush_figures():\n \"\"\"Send all figures that changed\n\n This is meant to be called automatically and will call show() if, during\n prior code execution, there had been any calls to draw_if_interactive.\n \n This function is meant to be used as a post_execute callback in IPython,\n so user-caused errors are handled with showtraceback() instead of being\n allowed to raise. If this function is not called from within IPython,\n then these exceptions will raise.\n \"\"\"\n if not show._draw_called:\n return\n \n if InlineBackend.instance().close_figures:\n # ignore the tracking, just draw and close all figures\n try:\n return show(True)\n except Exception as e:\n # safely show traceback if in IPython, else raise\n try:\n get_ipython\n except NameError:\n raise e\n else:\n get_ipython().showtraceback()\n return\n try:\n # exclude any figures that were closed:\n active = set([fm.canvas.figure for fm in Gcf.get_all_fig_managers()])\n for fig in [ fig for fig in show._to_draw if fig in active ]:\n try:\n display(fig)\n except Exception as e:\n # safely show traceback if in IPython, else raise\n try:\n get_ipython\n except NameError:\n raise e\n else:\n get_ipython().showtraceback()\n break\n finally:\n # clear flags for next round\n show._to_draw = []\n show._draw_called = False\n\n\n# Changes to matplotlib in version 1.2 requires a mpl backend to supply a default\n# figurecanvas. This is set here to a Agg canvas\n# See https://github.com/matplotlib/matplotlib/pull/1125\nFigureCanvas = FigureCanvasAgg\n\n", "path": "IPython/kernel/zmq/pylab/backend_inline.py"}]}
3,177
197
gh_patches_debug_40987
rasdani/github-patches
git_diff
mampfes__hacs_waste_collection_schedule-715
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `fccenvironment_co_uk.py` no longer works Tests are failing for some of the `fccenvironment.co.uk` related collection schedules as raised by @mothy-tim [here](https://github.com/mampfes/hacs_waste_collection_schedule/issues/268#issuecomment-1447053595) - further investigation shows that these domains have incorrectly installed certificates. * https://www.sslshopper.com/ssl-checker.html#hostname=www.fccenvironment.co.uk ![Screenshot 2023-02-27 at 22 16 33](https://user-images.githubusercontent.com/6557665/221698385-7c526b46-1587-4be1-9d17-9427e3367add.png) * https://www.sslshopper.com/ssl-checker.html#hostname=westdevon.fccenvironment.co.uk ![Screenshot 2023-02-27 at 22 17 16](https://user-images.githubusercontent.com/6557665/221698514-d5a44378-14c0-4389-b3aa-d3c669673219.png) I'm happy to reach out to fccenvironment.co.uk to let them know they've improperly installed these certificates, however, I doubt we'll get back any kind of worthwhile response. I'll be opening a Pull Request to skip TLS Verification for these domains, not an ideal solution, but a decent enough workaround. Should fccenvironment.co.uk respond to my email and fix their cert chain I'll open another PR to revert these commits. </issue> <code> [start of custom_components/waste_collection_schedule/waste_collection_schedule/source/fccenvironment_co_uk.py] 1 from urllib.parse import urlparse 2 3 import requests 4 from bs4 import BeautifulSoup 5 from dateutil import parser 6 from waste_collection_schedule import Collection 7 8 TITLE = "FCC Environment" 9 DESCRIPTION = """ 10 Consolidated source for waste collection services for ~60 local authorities. 11 Currently supports: 12 West Devon (Generic Provider) 13 South Hams (Generic Provider) 14 Market Harborough (Custom Provider) 15 """ 16 URL = "https://fccenvironment.co.uk" 17 EXTRA_INFO = [ 18 {"title": "Harborough District Council", "url": "https://harborough.gov.uk"}, 19 {"title": "South Hams District Council", "url": "https://southhams.gov.uk/"}, 20 {"title": "West Devon Borough Council", "url": "https://www.westdevon.gov.uk/"}, 21 ] 22 23 TEST_CASES = { 24 "14_LE16_9QX": {"uprn": "100030491624"}, # region omitted to test default values 25 "4_LE16_9QX": {"uprn": "100030491614", "region": "harborough"}, 26 "16_LE16_7NA": {"uprn": "100030493289", "region": "harborough"}, 27 "10_LE16_8ER": {"uprn": "200001136341", "region": "harborough"}, 28 "9_PL20_7SH": {"uprn": "10001326315", "region": "westdevon"}, 29 "3_PL20_7RY": {"uprn": "10001326041", "region": "westdevon"}, 30 "2_PL21_9BN": {"uprn": "100040279446", "region": "southhams"}, 31 "4_SL21_0HZ": {"uprn": "100040281987", "region": "southhams"}, 32 } 33 34 ICON_MAP = { 35 "Refuse": "mdi:trash-can", 36 "Recycling": "mdi:recycle", 37 "Garden": "mdi:leaf", 38 } 39 40 41 class Source: 42 def __init__(self, uprn: str, region: str = "harborough") -> None: 43 self.uprn = uprn 44 self.region = region 45 46 def getcollectiondetails(self, endpoint: str) -> list[Collection]: 47 domain = urlparse(endpoint).netloc 48 session = requests.Session() 49 cookies = session.get(f"https://{domain}/") 50 response = session.post( 51 endpoint, 52 headers={ 53 "x-requested-with": "XMLHttpRequest", 54 }, 55 data={ 56 "fcc_session_token": cookies.cookies["fcc_session_cookie"], 57 "uprn": self.uprn, 58 }, 59 ) 60 results = {} 61 for item in response.json()["binCollections"]["tile"]: 62 try: 63 soup = BeautifulSoup(item[0], "html.parser") 64 date = parser.parse( 65 soup.find_all("b")[2].text.split(",")[1].strip() 66 ).date() 67 service = soup.text.split("\n")[0] 68 except parser._parser.ParserError: 69 continue 70 71 """ 72 Handle duplication before creating the list of Collections 73 """ 74 for type in ICON_MAP: 75 if type in service: 76 if type in results.keys(): 77 if date < results[type]: 78 results[type] = date 79 else: 80 results[type] = date 81 82 entries = [] 83 for result in results: 84 entries.append( 85 Collection( 86 date=results[result], 87 t=result, 88 icon=ICON_MAP.get(result), 89 ) 90 ) 91 return entries 92 93 def harborough(self) -> list[Collection]: 94 _icons = { 95 "NON-RECYCLABLE WASTE BIN COLLECTION": "mdi:trash-can", 96 "RECYCLING COLLECTION": "mdi:recycle", 97 "GARDEN WASTE COLLECTION": "mdi:leaf", 98 } # Custom icons to avoid a breaking change 99 r = requests.post( 100 "https://www.fccenvironment.co.uk/harborough/detail-address", 101 data={"Uprn": self.uprn}, 102 ) 103 soup = BeautifulSoup(r.text, "html.parser") 104 services = soup.find( 105 "div", 106 attrs={"class": "blocks block-your-next-scheduled-bin-collection-days"}, 107 ).find_all("li") 108 entries = [] 109 for service in services: 110 for type in _icons: 111 if type.lower() in service.text.lower(): 112 try: 113 date = parser.parse( 114 service.find( 115 "span", attrs={"class": "pull-right"} 116 ).text.strip() 117 ).date() 118 except parser._parser.ParserError: 119 continue 120 121 entries.append( 122 Collection( 123 date=date, 124 t=type, 125 icon=_icons[type.upper()], 126 ) 127 ) 128 return entries 129 130 def fetch(self) -> list[Collection]: 131 if self.region == "harborough": 132 return self.harborough() 133 elif self.region == "westdevon": 134 return self.getcollectiondetails( 135 endpoint="https://westdevon.fccenvironment.co.uk/ajaxprocessor/getcollectiondetails" 136 ) 137 elif self.region == "southhams": 138 return self.getcollectiondetails( 139 endpoint="https://waste.southhams.gov.uk/mycollections/getcollectiondetails" 140 ) 141 [end of custom_components/waste_collection_schedule/waste_collection_schedule/source/fccenvironment_co_uk.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/fccenvironment_co_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/fccenvironment_co_uk.py --- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/fccenvironment_co_uk.py +++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/fccenvironment_co_uk.py @@ -1,10 +1,13 @@ from urllib.parse import urlparse import requests +import urllib3 from bs4 import BeautifulSoup from dateutil import parser from waste_collection_schedule import Collection +urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) + TITLE = "FCC Environment" DESCRIPTION = """ Consolidated source for waste collection services for ~60 local authorities. @@ -46,7 +49,7 @@ def getcollectiondetails(self, endpoint: str) -> list[Collection]: domain = urlparse(endpoint).netloc session = requests.Session() - cookies = session.get(f"https://{domain}/") + cookies = session.get(f"https://{domain}/", verify=False) response = session.post( endpoint, headers={ @@ -56,14 +59,13 @@ "fcc_session_token": cookies.cookies["fcc_session_cookie"], "uprn": self.uprn, }, + verify=False, ) results = {} for item in response.json()["binCollections"]["tile"]: try: soup = BeautifulSoup(item[0], "html.parser") - date = parser.parse( - soup.find_all("b")[2].text.split(",")[1].strip() - ).date() + date = parser.parse(soup.find_all("b")[2].text.split(",")[1].strip()).date() service = soup.text.split("\n")[0] except parser._parser.ParserError: continue @@ -99,6 +101,7 @@ r = requests.post( "https://www.fccenvironment.co.uk/harborough/detail-address", data={"Uprn": self.uprn}, + verify=False, ) soup = BeautifulSoup(r.text, "html.parser") services = soup.find( @@ -110,11 +113,7 @@ for type in _icons: if type.lower() in service.text.lower(): try: - date = parser.parse( - service.find( - "span", attrs={"class": "pull-right"} - ).text.strip() - ).date() + date = parser.parse(service.find("span", attrs={"class": "pull-right"}).text.strip()).date() except parser._parser.ParserError: continue
{"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/fccenvironment_co_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/fccenvironment_co_uk.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/fccenvironment_co_uk.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/fccenvironment_co_uk.py\n@@ -1,10 +1,13 @@\n from urllib.parse import urlparse\n \n import requests\n+import urllib3\n from bs4 import BeautifulSoup\n from dateutil import parser\n from waste_collection_schedule import Collection\n \n+urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n+\n TITLE = \"FCC Environment\"\n DESCRIPTION = \"\"\"\n Consolidated source for waste collection services for ~60 local authorities.\n@@ -46,7 +49,7 @@\n def getcollectiondetails(self, endpoint: str) -> list[Collection]:\n domain = urlparse(endpoint).netloc\n session = requests.Session()\n- cookies = session.get(f\"https://{domain}/\")\n+ cookies = session.get(f\"https://{domain}/\", verify=False)\n response = session.post(\n endpoint,\n headers={\n@@ -56,14 +59,13 @@\n \"fcc_session_token\": cookies.cookies[\"fcc_session_cookie\"],\n \"uprn\": self.uprn,\n },\n+ verify=False,\n )\n results = {}\n for item in response.json()[\"binCollections\"][\"tile\"]:\n try:\n soup = BeautifulSoup(item[0], \"html.parser\")\n- date = parser.parse(\n- soup.find_all(\"b\")[2].text.split(\",\")[1].strip()\n- ).date()\n+ date = parser.parse(soup.find_all(\"b\")[2].text.split(\",\")[1].strip()).date()\n service = soup.text.split(\"\\n\")[0]\n except parser._parser.ParserError:\n continue\n@@ -99,6 +101,7 @@\n r = requests.post(\n \"https://www.fccenvironment.co.uk/harborough/detail-address\",\n data={\"Uprn\": self.uprn},\n+ verify=False,\n )\n soup = BeautifulSoup(r.text, \"html.parser\")\n services = soup.find(\n@@ -110,11 +113,7 @@\n for type in _icons:\n if type.lower() in service.text.lower():\n try:\n- date = parser.parse(\n- service.find(\n- \"span\", attrs={\"class\": \"pull-right\"}\n- ).text.strip()\n- ).date()\n+ date = parser.parse(service.find(\"span\", attrs={\"class\": \"pull-right\"}).text.strip()).date()\n except parser._parser.ParserError:\n continue\n", "issue": "`fccenvironment_co_uk.py` no longer works\nTests are failing for some of the `fccenvironment.co.uk` related collection schedules as raised by @mothy-tim [here](https://github.com/mampfes/hacs_waste_collection_schedule/issues/268#issuecomment-1447053595) - further investigation shows that these domains have incorrectly installed certificates.\r\n\r\n* https://www.sslshopper.com/ssl-checker.html#hostname=www.fccenvironment.co.uk\r\n![Screenshot 2023-02-27 at 22 16 33](https://user-images.githubusercontent.com/6557665/221698385-7c526b46-1587-4be1-9d17-9427e3367add.png)\r\n* https://www.sslshopper.com/ssl-checker.html#hostname=westdevon.fccenvironment.co.uk\r\n![Screenshot 2023-02-27 at 22 17 16](https://user-images.githubusercontent.com/6557665/221698514-d5a44378-14c0-4389-b3aa-d3c669673219.png)\r\n\r\nI'm happy to reach out to fccenvironment.co.uk to let them know they've improperly installed these certificates, however, I doubt we'll get back any kind of worthwhile response.\r\n\r\nI'll be opening a Pull Request to skip TLS Verification for these domains, not an ideal solution, but a decent enough workaround. Should fccenvironment.co.uk respond to my email and fix their cert chain I'll open another PR to revert these commits.\n", "before_files": [{"content": "from urllib.parse import urlparse\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom dateutil import parser\nfrom waste_collection_schedule import Collection\n\nTITLE = \"FCC Environment\"\nDESCRIPTION = \"\"\"\n Consolidated source for waste collection services for ~60 local authorities.\n Currently supports:\n West Devon (Generic Provider)\n South Hams (Generic Provider)\n Market Harborough (Custom Provider)\n \"\"\"\nURL = \"https://fccenvironment.co.uk\"\nEXTRA_INFO = [\n {\"title\": \"Harborough District Council\", \"url\": \"https://harborough.gov.uk\"},\n {\"title\": \"South Hams District Council\", \"url\": \"https://southhams.gov.uk/\"},\n {\"title\": \"West Devon Borough Council\", \"url\": \"https://www.westdevon.gov.uk/\"},\n]\n\nTEST_CASES = {\n \"14_LE16_9QX\": {\"uprn\": \"100030491624\"}, # region omitted to test default values\n \"4_LE16_9QX\": {\"uprn\": \"100030491614\", \"region\": \"harborough\"},\n \"16_LE16_7NA\": {\"uprn\": \"100030493289\", \"region\": \"harborough\"},\n \"10_LE16_8ER\": {\"uprn\": \"200001136341\", \"region\": \"harborough\"},\n \"9_PL20_7SH\": {\"uprn\": \"10001326315\", \"region\": \"westdevon\"},\n \"3_PL20_7RY\": {\"uprn\": \"10001326041\", \"region\": \"westdevon\"},\n \"2_PL21_9BN\": {\"uprn\": \"100040279446\", \"region\": \"southhams\"},\n \"4_SL21_0HZ\": {\"uprn\": \"100040281987\", \"region\": \"southhams\"},\n}\n\nICON_MAP = {\n \"Refuse\": \"mdi:trash-can\",\n \"Recycling\": \"mdi:recycle\",\n \"Garden\": \"mdi:leaf\",\n}\n\n\nclass Source:\n def __init__(self, uprn: str, region: str = \"harborough\") -> None:\n self.uprn = uprn\n self.region = region\n\n def getcollectiondetails(self, endpoint: str) -> list[Collection]:\n domain = urlparse(endpoint).netloc\n session = requests.Session()\n cookies = session.get(f\"https://{domain}/\")\n response = session.post(\n endpoint,\n headers={\n \"x-requested-with\": \"XMLHttpRequest\",\n },\n data={\n \"fcc_session_token\": cookies.cookies[\"fcc_session_cookie\"],\n \"uprn\": self.uprn,\n },\n )\n results = {}\n for item in response.json()[\"binCollections\"][\"tile\"]:\n try:\n soup = BeautifulSoup(item[0], \"html.parser\")\n date = parser.parse(\n soup.find_all(\"b\")[2].text.split(\",\")[1].strip()\n ).date()\n service = soup.text.split(\"\\n\")[0]\n except parser._parser.ParserError:\n continue\n\n \"\"\"\n Handle duplication before creating the list of Collections\n \"\"\"\n for type in ICON_MAP:\n if type in service:\n if type in results.keys():\n if date < results[type]:\n results[type] = date\n else:\n results[type] = date\n\n entries = []\n for result in results:\n entries.append(\n Collection(\n date=results[result],\n t=result,\n icon=ICON_MAP.get(result),\n )\n )\n return entries\n\n def harborough(self) -> list[Collection]:\n _icons = {\n \"NON-RECYCLABLE WASTE BIN COLLECTION\": \"mdi:trash-can\",\n \"RECYCLING COLLECTION\": \"mdi:recycle\",\n \"GARDEN WASTE COLLECTION\": \"mdi:leaf\",\n } # Custom icons to avoid a breaking change\n r = requests.post(\n \"https://www.fccenvironment.co.uk/harborough/detail-address\",\n data={\"Uprn\": self.uprn},\n )\n soup = BeautifulSoup(r.text, \"html.parser\")\n services = soup.find(\n \"div\",\n attrs={\"class\": \"blocks block-your-next-scheduled-bin-collection-days\"},\n ).find_all(\"li\")\n entries = []\n for service in services:\n for type in _icons:\n if type.lower() in service.text.lower():\n try:\n date = parser.parse(\n service.find(\n \"span\", attrs={\"class\": \"pull-right\"}\n ).text.strip()\n ).date()\n except parser._parser.ParserError:\n continue\n\n entries.append(\n Collection(\n date=date,\n t=type,\n icon=_icons[type.upper()],\n )\n )\n return entries\n\n def fetch(self) -> list[Collection]:\n if self.region == \"harborough\":\n return self.harborough()\n elif self.region == \"westdevon\":\n return self.getcollectiondetails(\n endpoint=\"https://westdevon.fccenvironment.co.uk/ajaxprocessor/getcollectiondetails\"\n )\n elif self.region == \"southhams\":\n return self.getcollectiondetails(\n endpoint=\"https://waste.southhams.gov.uk/mycollections/getcollectiondetails\"\n )\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/fccenvironment_co_uk.py"}]}
2,450
582
gh_patches_debug_39326
rasdani/github-patches
git_diff
scrapy__scrapy-4803
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> CachingHostnameResolver does not work with reactor.resolve() ### Description Hi. Thank you for maintaining this awesome software :) I am working on a project using scrapy that implements a custom downloader class ([link](https://github.com/michael-lazar/mozz-archiver/blob/master/mozz_archiver/downloaders.py)). I want to resolve IPv6 addresses, and I found the section in the documentation about the ``DNS_RESOLVER`` setting that was added in #4227. I tried enabling the new ``DNS_RESOLVER = "scrapy.resolver.CachingHostnameResolver"`` and was immediately greeted with this exception. ``` Unhandled Error Traceback (most recent call last): File "/usr/local/lib/python3.8/site-packages/scrapy/commands/crawl.py", line 27, in run self.crawler_process.start() File "/usr/local/lib/python3.8/site-packages/scrapy/crawler.py", line 327, in start reactor.run(installSignalHandlers=False) # blocking call File "/usr/local/lib/python3.8/site-packages/twisted/internet/base.py", line 1283, in run self.mainLoop() File "/usr/local/lib/python3.8/site-packages/twisted/internet/base.py", line 1292, in mainLoop self.runUntilCurrent() --- <exception caught here> --- File "/usr/local/lib/python3.8/site-packages/twisted/internet/base.py", line 913, in runUntilCurrent call.func(*call.args, **call.kw) File "/usr/local/lib/python3.8/site-packages/twisted/internet/tcp.py", line 449, in resolveAddress d = self.reactor.resolve(self.addr[0]) File "/usr/local/lib/python3.8/site-packages/twisted/internet/base.py", line 638, in resolve return self.resolver.getHostByName(name, timeout) File "/usr/local/lib/python3.8/site-packages/twisted/internet/_resolver.py", line 277, in getHostByName self._nameResolver.resolveHostName(FirstOneWins(result), name, 0, File "/usr/local/lib/python3.8/site-packages/scrapy/resolver.py", line 80, in resolveHostName class CachingResolutionReceiver(resolutionReceiver): builtins.TypeError: __init__() takes 2 positional arguments but 4 were given ``` ### Steps to Reproduce This is also reproducible using the bundled FTP downloader 1. ``scrapy startproject scrapy_test`` 2. ``scrapy genspider example mozz.us`` 3. Add ``DNS_RESOLVER = "scrapy.resolver.CachingHostnameResolver"`` to the settings file 4. Change the spider start_url to ``ftp://mozz.us`` 5. ``scrapy crawl scrapy_test`` ### Versions ``` Scrapy : 2.3.0 lxml : 4.5.2.0 libxml2 : 2.9.10 cssselect : 1.1.0 parsel : 1.6.0 w3lib : 1.22.0 Twisted : 20.3.0 Python : 3.8.5 (default, Jul 21 2020, 10:48:26) - [Clang 11.0.3 (clang-1103.0.32.62)] pyOpenSSL : 19.1.0 (OpenSSL 1.1.1g 21 Apr 2020) cryptography : 3.0 Platform : macOS-10.15.6-x86_64-i386-64bit ``` ### Additional context This was a tricky one to debug because everything works as expected with the HTTP Agent downloader. This issue only appears when you implement a downloader that depends on calling ``reactor.resolve()`` directly without using ``twisted.internet.endpoints.HostnameEndpoint``. I discovered that in the twisted [IHostnameResolver](https://twistedmatrix.com/documents/current/api/twisted.internet.interfaces.IHostnameResolver.html) interface, the ``resolutionReceiver`` method argument is expected to be an *instance* of a resolution receiver class, and not a *type* of a resolution receiver class. So I believe the scrapy code below is incorrect: https://github.com/scrapy/scrapy/blob/5e997587d9b13344a0afa9bb4cf781829a66ce23/scrapy/resolver.py#L76-L80 The subclass here only works with the Scrapy Agent because the ``HostnameEndpoint`` does this weird thing where it defines a class with only static methods, so it can pass the class itself instead of instantiating it. https://github.com/twisted/twisted/blob/22f949f7ce187513f0c218b73186c8a73baa00b4/src/twisted/internet/endpoints.py#L942-L958 ```python @provider(IResolutionReceiver) class EndpointReceiver: @staticmethod def resolutionBegan(resolutionInProgress): pass @staticmethod def addressResolved(address): addresses.append(address) @staticmethod def resolutionComplete(): d.callback(addresses) self._nameResolver.resolveHostName( EndpointReceiver, self._hostText, portNumber=self._port ) ``` However, there are other places in the twisted reactor where twisted does pass an object instance directly to this method. https://github.com/twisted/twisted/blob/7e3ce790ca9f004ab386f9ecbba8f505d66cd3bd/src/twisted/internet/_resolver.py#L307 ```python result = Deferred() self._nameResolver.resolveHostName(FirstOneWins(result), name, 0, [IPv4Address]) return result ``` </issue> <code> [start of scrapy/resolver.py] 1 from twisted.internet import defer 2 from twisted.internet.base import ThreadedResolver 3 from twisted.internet.interfaces import IHostnameResolver, IResolutionReceiver, IResolverSimple 4 from zope.interface.declarations import implementer, provider 5 6 from scrapy.utils.datatypes import LocalCache 7 8 9 # TODO: cache misses 10 dnscache = LocalCache(10000) 11 12 13 @implementer(IResolverSimple) 14 class CachingThreadedResolver(ThreadedResolver): 15 """ 16 Default caching resolver. IPv4 only, supports setting a timeout value for DNS requests. 17 """ 18 19 def __init__(self, reactor, cache_size, timeout): 20 super().__init__(reactor) 21 dnscache.limit = cache_size 22 self.timeout = timeout 23 24 @classmethod 25 def from_crawler(cls, crawler, reactor): 26 if crawler.settings.getbool('DNSCACHE_ENABLED'): 27 cache_size = crawler.settings.getint('DNSCACHE_SIZE') 28 else: 29 cache_size = 0 30 return cls(reactor, cache_size, crawler.settings.getfloat('DNS_TIMEOUT')) 31 32 def install_on_reactor(self): 33 self.reactor.installResolver(self) 34 35 def getHostByName(self, name, timeout=None): 36 if name in dnscache: 37 return defer.succeed(dnscache[name]) 38 # in Twisted<=16.6, getHostByName() is always called with 39 # a default timeout of 60s (actually passed as (1, 3, 11, 45) tuple), 40 # so the input argument above is simply overridden 41 # to enforce Scrapy's DNS_TIMEOUT setting's value 42 timeout = (self.timeout,) 43 d = super().getHostByName(name, timeout) 44 if dnscache.limit: 45 d.addCallback(self._cache_result, name) 46 return d 47 48 def _cache_result(self, result, name): 49 dnscache[name] = result 50 return result 51 52 53 @implementer(IHostnameResolver) 54 class CachingHostnameResolver: 55 """ 56 Experimental caching resolver. Resolves IPv4 and IPv6 addresses, 57 does not support setting a timeout value for DNS requests. 58 """ 59 60 def __init__(self, reactor, cache_size): 61 self.reactor = reactor 62 self.original_resolver = reactor.nameResolver 63 dnscache.limit = cache_size 64 65 @classmethod 66 def from_crawler(cls, crawler, reactor): 67 if crawler.settings.getbool('DNSCACHE_ENABLED'): 68 cache_size = crawler.settings.getint('DNSCACHE_SIZE') 69 else: 70 cache_size = 0 71 return cls(reactor, cache_size) 72 73 def install_on_reactor(self): 74 self.reactor.installNameResolver(self) 75 76 def resolveHostName(self, resolutionReceiver, hostName, portNumber=0, 77 addressTypes=None, transportSemantics='TCP'): 78 79 @provider(IResolutionReceiver) 80 class CachingResolutionReceiver(resolutionReceiver): 81 82 def resolutionBegan(self, resolution): 83 super().resolutionBegan(resolution) 84 self.resolution = resolution 85 self.resolved = False 86 87 def addressResolved(self, address): 88 super().addressResolved(address) 89 self.resolved = True 90 91 def resolutionComplete(self): 92 super().resolutionComplete() 93 if self.resolved: 94 dnscache[hostName] = self.resolution 95 96 try: 97 return dnscache[hostName] 98 except KeyError: 99 return self.original_resolver.resolveHostName( 100 CachingResolutionReceiver(), 101 hostName, 102 portNumber, 103 addressTypes, 104 transportSemantics 105 ) 106 [end of scrapy/resolver.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scrapy/resolver.py b/scrapy/resolver.py --- a/scrapy/resolver.py +++ b/scrapy/resolver.py @@ -1,6 +1,6 @@ from twisted.internet import defer from twisted.internet.base import ThreadedResolver -from twisted.internet.interfaces import IHostnameResolver, IResolutionReceiver, IResolverSimple +from twisted.internet.interfaces import IHostResolution, IHostnameResolver, IResolutionReceiver, IResolverSimple from zope.interface.declarations import implementer, provider from scrapy.utils.datatypes import LocalCache @@ -50,6 +50,36 @@ return result +@implementer(IHostResolution) +class HostResolution: + def __init__(self, name): + self.name = name + + def cancel(self): + raise NotImplementedError() + + +@provider(IResolutionReceiver) +class _CachingResolutionReceiver: + def __init__(self, resolutionReceiver, hostName): + self.resolutionReceiver = resolutionReceiver + self.hostName = hostName + self.addresses = [] + + def resolutionBegan(self, resolution): + self.resolutionReceiver.resolutionBegan(resolution) + self.resolution = resolution + + def addressResolved(self, address): + self.resolutionReceiver.addressResolved(address) + self.addresses.append(address) + + def resolutionComplete(self): + self.resolutionReceiver.resolutionComplete() + if self.addresses: + dnscache[self.hostName] = self.addresses + + @implementer(IHostnameResolver) class CachingHostnameResolver: """ @@ -73,33 +103,22 @@ def install_on_reactor(self): self.reactor.installNameResolver(self) - def resolveHostName(self, resolutionReceiver, hostName, portNumber=0, - addressTypes=None, transportSemantics='TCP'): - - @provider(IResolutionReceiver) - class CachingResolutionReceiver(resolutionReceiver): - - def resolutionBegan(self, resolution): - super().resolutionBegan(resolution) - self.resolution = resolution - self.resolved = False - - def addressResolved(self, address): - super().addressResolved(address) - self.resolved = True - - def resolutionComplete(self): - super().resolutionComplete() - if self.resolved: - dnscache[hostName] = self.resolution - + def resolveHostName( + self, resolutionReceiver, hostName, portNumber=0, addressTypes=None, transportSemantics="TCP" + ): try: - return dnscache[hostName] + addresses = dnscache[hostName] except KeyError: return self.original_resolver.resolveHostName( - CachingResolutionReceiver(), + _CachingResolutionReceiver(resolutionReceiver, hostName), hostName, portNumber, addressTypes, - transportSemantics + transportSemantics, ) + else: + resolutionReceiver.resolutionBegan(HostResolution(hostName)) + for addr in addresses: + resolutionReceiver.addressResolved(addr) + resolutionReceiver.resolutionComplete() + return resolutionReceiver
{"golden_diff": "diff --git a/scrapy/resolver.py b/scrapy/resolver.py\n--- a/scrapy/resolver.py\n+++ b/scrapy/resolver.py\n@@ -1,6 +1,6 @@\n from twisted.internet import defer\n from twisted.internet.base import ThreadedResolver\n-from twisted.internet.interfaces import IHostnameResolver, IResolutionReceiver, IResolverSimple\n+from twisted.internet.interfaces import IHostResolution, IHostnameResolver, IResolutionReceiver, IResolverSimple\n from zope.interface.declarations import implementer, provider\n \n from scrapy.utils.datatypes import LocalCache\n@@ -50,6 +50,36 @@\n return result\n \n \n+@implementer(IHostResolution)\n+class HostResolution:\n+ def __init__(self, name):\n+ self.name = name\n+\n+ def cancel(self):\n+ raise NotImplementedError()\n+\n+\n+@provider(IResolutionReceiver)\n+class _CachingResolutionReceiver:\n+ def __init__(self, resolutionReceiver, hostName):\n+ self.resolutionReceiver = resolutionReceiver\n+ self.hostName = hostName\n+ self.addresses = []\n+\n+ def resolutionBegan(self, resolution):\n+ self.resolutionReceiver.resolutionBegan(resolution)\n+ self.resolution = resolution\n+\n+ def addressResolved(self, address):\n+ self.resolutionReceiver.addressResolved(address)\n+ self.addresses.append(address)\n+\n+ def resolutionComplete(self):\n+ self.resolutionReceiver.resolutionComplete()\n+ if self.addresses:\n+ dnscache[self.hostName] = self.addresses\n+\n+\n @implementer(IHostnameResolver)\n class CachingHostnameResolver:\n \"\"\"\n@@ -73,33 +103,22 @@\n def install_on_reactor(self):\n self.reactor.installNameResolver(self)\n \n- def resolveHostName(self, resolutionReceiver, hostName, portNumber=0,\n- addressTypes=None, transportSemantics='TCP'):\n-\n- @provider(IResolutionReceiver)\n- class CachingResolutionReceiver(resolutionReceiver):\n-\n- def resolutionBegan(self, resolution):\n- super().resolutionBegan(resolution)\n- self.resolution = resolution\n- self.resolved = False\n-\n- def addressResolved(self, address):\n- super().addressResolved(address)\n- self.resolved = True\n-\n- def resolutionComplete(self):\n- super().resolutionComplete()\n- if self.resolved:\n- dnscache[hostName] = self.resolution\n-\n+ def resolveHostName(\n+ self, resolutionReceiver, hostName, portNumber=0, addressTypes=None, transportSemantics=\"TCP\"\n+ ):\n try:\n- return dnscache[hostName]\n+ addresses = dnscache[hostName]\n except KeyError:\n return self.original_resolver.resolveHostName(\n- CachingResolutionReceiver(),\n+ _CachingResolutionReceiver(resolutionReceiver, hostName),\n hostName,\n portNumber,\n addressTypes,\n- transportSemantics\n+ transportSemantics,\n )\n+ else:\n+ resolutionReceiver.resolutionBegan(HostResolution(hostName))\n+ for addr in addresses:\n+ resolutionReceiver.addressResolved(addr)\n+ resolutionReceiver.resolutionComplete()\n+ return resolutionReceiver\n", "issue": "CachingHostnameResolver does not work with reactor.resolve()\n### Description\r\n\r\nHi. Thank you for maintaining this awesome software :)\r\n\r\nI am working on a project using scrapy that implements a custom downloader class ([link](https://github.com/michael-lazar/mozz-archiver/blob/master/mozz_archiver/downloaders.py)).\r\n\r\nI want to resolve IPv6 addresses, and I found the section in the documentation about the ``DNS_RESOLVER`` setting that was added in #4227. I tried enabling the new ``DNS_RESOLVER = \"scrapy.resolver.CachingHostnameResolver\"`` and was immediately greeted with this exception.\r\n\r\n```\r\nUnhandled Error\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.8/site-packages/scrapy/commands/crawl.py\", line 27, in run\r\n self.crawler_process.start()\r\n File \"/usr/local/lib/python3.8/site-packages/scrapy/crawler.py\", line 327, in start\r\n reactor.run(installSignalHandlers=False) # blocking call\r\n File \"/usr/local/lib/python3.8/site-packages/twisted/internet/base.py\", line 1283, in run\r\n self.mainLoop()\r\n File \"/usr/local/lib/python3.8/site-packages/twisted/internet/base.py\", line 1292, in mainLoop\r\n self.runUntilCurrent()\r\n--- <exception caught here> ---\r\n File \"/usr/local/lib/python3.8/site-packages/twisted/internet/base.py\", line 913, in runUntilCurrent\r\n call.func(*call.args, **call.kw)\r\n File \"/usr/local/lib/python3.8/site-packages/twisted/internet/tcp.py\", line 449, in resolveAddress\r\n d = self.reactor.resolve(self.addr[0])\r\n File \"/usr/local/lib/python3.8/site-packages/twisted/internet/base.py\", line 638, in resolve\r\n return self.resolver.getHostByName(name, timeout)\r\n File \"/usr/local/lib/python3.8/site-packages/twisted/internet/_resolver.py\", line 277, in getHostByName\r\n self._nameResolver.resolveHostName(FirstOneWins(result), name, 0,\r\n File \"/usr/local/lib/python3.8/site-packages/scrapy/resolver.py\", line 80, in resolveHostName\r\n class CachingResolutionReceiver(resolutionReceiver):\r\nbuiltins.TypeError: __init__() takes 2 positional arguments but 4 were given\r\n```\r\n\r\n### Steps to Reproduce\r\n\r\nThis is also reproducible using the bundled FTP downloader\r\n\r\n1. ``scrapy startproject scrapy_test``\r\n2. ``scrapy genspider example mozz.us``\r\n3. Add ``DNS_RESOLVER = \"scrapy.resolver.CachingHostnameResolver\"`` to the settings file\r\n4. Change the spider start_url to ``ftp://mozz.us``\r\n5. ``scrapy crawl scrapy_test``\r\n\r\n### Versions\r\n\r\n```\r\nScrapy : 2.3.0\r\nlxml : 4.5.2.0\r\nlibxml2 : 2.9.10\r\ncssselect : 1.1.0\r\nparsel : 1.6.0\r\nw3lib : 1.22.0\r\nTwisted : 20.3.0\r\nPython : 3.8.5 (default, Jul 21 2020, 10:48:26) - [Clang 11.0.3 (clang-1103.0.32.62)]\r\npyOpenSSL : 19.1.0 (OpenSSL 1.1.1g 21 Apr 2020)\r\ncryptography : 3.0\r\nPlatform : macOS-10.15.6-x86_64-i386-64bit\r\n```\r\n\r\n### Additional context\r\n\r\nThis was a tricky one to debug because everything works as expected with the HTTP Agent downloader. This issue only appears when you implement a downloader that depends on calling ``reactor.resolve()`` directly without using ``twisted.internet.endpoints.HostnameEndpoint``.\r\n\r\nI discovered that in the twisted [IHostnameResolver](https://twistedmatrix.com/documents/current/api/twisted.internet.interfaces.IHostnameResolver.html) interface, the ``resolutionReceiver`` method argument is expected to be an *instance* of a resolution receiver class, and not a *type* of a resolution receiver class. So I believe the scrapy code below is incorrect:\r\n\r\nhttps://github.com/scrapy/scrapy/blob/5e997587d9b13344a0afa9bb4cf781829a66ce23/scrapy/resolver.py#L76-L80\r\n\r\nThe subclass here only works with the Scrapy Agent because the ``HostnameEndpoint`` does this weird thing where it defines a class with only static methods, so it can pass the class itself instead of instantiating it.\r\n\r\nhttps://github.com/twisted/twisted/blob/22f949f7ce187513f0c218b73186c8a73baa00b4/src/twisted/internet/endpoints.py#L942-L958\r\n\r\n```python\r\n @provider(IResolutionReceiver)\r\n class EndpointReceiver:\r\n @staticmethod\r\n def resolutionBegan(resolutionInProgress):\r\n pass\r\n\r\n @staticmethod\r\n def addressResolved(address):\r\n addresses.append(address)\r\n\r\n @staticmethod\r\n def resolutionComplete():\r\n d.callback(addresses)\r\n\r\n self._nameResolver.resolveHostName(\r\n EndpointReceiver, self._hostText, portNumber=self._port\r\n )\r\n```\r\n\r\nHowever, there are other places in the twisted reactor where twisted does pass an object instance directly to this method.\r\n\r\nhttps://github.com/twisted/twisted/blob/7e3ce790ca9f004ab386f9ecbba8f505d66cd3bd/src/twisted/internet/_resolver.py#L307\r\n\r\n```python\r\n result = Deferred()\r\n self._nameResolver.resolveHostName(FirstOneWins(result), name, 0, [IPv4Address])\r\n return result\r\n```\r\n\r\n\r\n\n", "before_files": [{"content": "from twisted.internet import defer\nfrom twisted.internet.base import ThreadedResolver\nfrom twisted.internet.interfaces import IHostnameResolver, IResolutionReceiver, IResolverSimple\nfrom zope.interface.declarations import implementer, provider\n\nfrom scrapy.utils.datatypes import LocalCache\n\n\n# TODO: cache misses\ndnscache = LocalCache(10000)\n\n\n@implementer(IResolverSimple)\nclass CachingThreadedResolver(ThreadedResolver):\n \"\"\"\n Default caching resolver. IPv4 only, supports setting a timeout value for DNS requests.\n \"\"\"\n\n def __init__(self, reactor, cache_size, timeout):\n super().__init__(reactor)\n dnscache.limit = cache_size\n self.timeout = timeout\n\n @classmethod\n def from_crawler(cls, crawler, reactor):\n if crawler.settings.getbool('DNSCACHE_ENABLED'):\n cache_size = crawler.settings.getint('DNSCACHE_SIZE')\n else:\n cache_size = 0\n return cls(reactor, cache_size, crawler.settings.getfloat('DNS_TIMEOUT'))\n\n def install_on_reactor(self):\n self.reactor.installResolver(self)\n\n def getHostByName(self, name, timeout=None):\n if name in dnscache:\n return defer.succeed(dnscache[name])\n # in Twisted<=16.6, getHostByName() is always called with\n # a default timeout of 60s (actually passed as (1, 3, 11, 45) tuple),\n # so the input argument above is simply overridden\n # to enforce Scrapy's DNS_TIMEOUT setting's value\n timeout = (self.timeout,)\n d = super().getHostByName(name, timeout)\n if dnscache.limit:\n d.addCallback(self._cache_result, name)\n return d\n\n def _cache_result(self, result, name):\n dnscache[name] = result\n return result\n\n\n@implementer(IHostnameResolver)\nclass CachingHostnameResolver:\n \"\"\"\n Experimental caching resolver. Resolves IPv4 and IPv6 addresses,\n does not support setting a timeout value for DNS requests.\n \"\"\"\n\n def __init__(self, reactor, cache_size):\n self.reactor = reactor\n self.original_resolver = reactor.nameResolver\n dnscache.limit = cache_size\n\n @classmethod\n def from_crawler(cls, crawler, reactor):\n if crawler.settings.getbool('DNSCACHE_ENABLED'):\n cache_size = crawler.settings.getint('DNSCACHE_SIZE')\n else:\n cache_size = 0\n return cls(reactor, cache_size)\n\n def install_on_reactor(self):\n self.reactor.installNameResolver(self)\n\n def resolveHostName(self, resolutionReceiver, hostName, portNumber=0,\n addressTypes=None, transportSemantics='TCP'):\n\n @provider(IResolutionReceiver)\n class CachingResolutionReceiver(resolutionReceiver):\n\n def resolutionBegan(self, resolution):\n super().resolutionBegan(resolution)\n self.resolution = resolution\n self.resolved = False\n\n def addressResolved(self, address):\n super().addressResolved(address)\n self.resolved = True\n\n def resolutionComplete(self):\n super().resolutionComplete()\n if self.resolved:\n dnscache[hostName] = self.resolution\n\n try:\n return dnscache[hostName]\n except KeyError:\n return self.original_resolver.resolveHostName(\n CachingResolutionReceiver(),\n hostName,\n portNumber,\n addressTypes,\n transportSemantics\n )\n", "path": "scrapy/resolver.py"}]}
2,850
681
gh_patches_debug_17125
rasdani/github-patches
git_diff
spack__spack-18458
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Installation issue: r-boot <!-- Thanks for taking the time to report this build failure. To proceed with the report please: 1. Title the issue "Installation issue: <name-of-the-package>". 2. Provide the information required below. We encourage you to try, as much as possible, to reduce your problem to the minimal example that still reproduces the issue. That would help us a lot in fixing it quickly and effectively! --> ### Steps to reproduce the issue <!-- Fill in the exact spec you are trying to build and the relevant part of the error message --> ```console $ spack install r-boot%fj ==> Error: ChecksumError: sha256 checksum failed for /home/users/ea01/ea0114/spack-stage/spack-stage-r-boot-1.3-23-mm6cmoaof62r5y527kz24snjifgwpir6/boot_1.3-23.tar.gz Expected 30c89e19dd6490b943233e87dfe422bfef92cfbb7a7dfb5c17dfd9b2d63fa02f but got 79236a5a770dc8bf5ce25d9aa303c5dc0574d94aa043fd00b8b4c8ccc877357f ``` Build of `r-boot%fj` on 2020 Aug has a checksum error. This version added to spack at 31 Aug 2019. Please see https://github.com/spack/spack/commit/661a894c85f451a4ef868abcc9871653914361bd According to our log, same build succeeded on 2019 Oct. https://cloud.r-project.org/src/contrib/boot_1.3-23.tar.gz seems to be changed between these attempts. We found old(30c89e19) boot_1.3-23.tar.gz from http://in.archive.ubuntu.com/pool/universe/b/boot/boot_1.3-23.orig.tar.gz and compared with new(79236a5a) one. Difference was tribial. ("Date/Publication" in boot/DESCRIPTION, and MD5 of the file in boot/MD5) So I would like to update checksum value. We have another question. In this case, we found "old" archive and proof the differnce is trivial. If we found checksum mismatch and could not find "old" archive to verify, which is better in view of security? 1. create issue and discuss 2. directly make PR <!-- Some packages have maintainers who have volunteered to debug build failures. Run `spack maintainers <name-of-the-package>` and @mention them here if they exist. --> ### General information <!-- These boxes can be checked by replacing [ ] with [x] or by clicking them after submitting the issue. --> - [ ] I have run `spack debug report` and reported the version of Spack/Python/Platform - [ ] I have run `spack maintainers <name-of-the-package>` and @mentioned any maintainers - [ ] I have uploaded the build log and environment files - [x] I have searched the issues of this repo and believe this is not a duplicate </issue> <code> [start of var/spack/repos/builtin/packages/r-boot/package.py] 1 # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other 2 # Spack Project Developers. See the top-level COPYRIGHT file for details. 3 # 4 # SPDX-License-Identifier: (Apache-2.0 OR MIT) 5 6 from spack import * 7 8 9 class RBoot(RPackage): 10 """Functions and datasets for bootstrapping from the book "Bootstrap 11 Methods and Their Application" by A. C. Davison and D. V. Hinkley (1997, 12 CUP), originally written by Angelo Canty for S.""" 13 14 homepage = "https://cloud.r-project.org/package=boot" 15 url = "https://cloud.r-project.org/src/contrib/boot_1.3-18.tar.gz" 16 list_url = "https://cloud.r-project.org/src/contrib/Archive/boot" 17 18 version('1.3-23', sha256='30c89e19dd6490b943233e87dfe422bfef92cfbb7a7dfb5c17dfd9b2d63fa02f') 19 version('1.3-22', sha256='cf1f0cb1e0a7a36dcb6ae038f5d0211a0e7a009c149bc9d21acb9c58c38b4dfc') 20 version('1.3-20', sha256='adcb90b72409705e3f9c69ea6c15673dcb649b464fed06723fe0930beac5212a') 21 version('1.3-18', sha256='12fd237f810a69cc8d0a51a67c57eaf9506bf0341c764f8ab7c1feb73722235e') 22 23 depends_on('[email protected]:', type=('build', 'run')) 24 [end of var/spack/repos/builtin/packages/r-boot/package.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/var/spack/repos/builtin/packages/r-boot/package.py b/var/spack/repos/builtin/packages/r-boot/package.py --- a/var/spack/repos/builtin/packages/r-boot/package.py +++ b/var/spack/repos/builtin/packages/r-boot/package.py @@ -15,7 +15,7 @@ url = "https://cloud.r-project.org/src/contrib/boot_1.3-18.tar.gz" list_url = "https://cloud.r-project.org/src/contrib/Archive/boot" - version('1.3-23', sha256='30c89e19dd6490b943233e87dfe422bfef92cfbb7a7dfb5c17dfd9b2d63fa02f') + version('1.3-23', sha256='79236a5a770dc8bf5ce25d9aa303c5dc0574d94aa043fd00b8b4c8ccc877357f') version('1.3-22', sha256='cf1f0cb1e0a7a36dcb6ae038f5d0211a0e7a009c149bc9d21acb9c58c38b4dfc') version('1.3-20', sha256='adcb90b72409705e3f9c69ea6c15673dcb649b464fed06723fe0930beac5212a') version('1.3-18', sha256='12fd237f810a69cc8d0a51a67c57eaf9506bf0341c764f8ab7c1feb73722235e')
{"golden_diff": "diff --git a/var/spack/repos/builtin/packages/r-boot/package.py b/var/spack/repos/builtin/packages/r-boot/package.py\n--- a/var/spack/repos/builtin/packages/r-boot/package.py\n+++ b/var/spack/repos/builtin/packages/r-boot/package.py\n@@ -15,7 +15,7 @@\n url = \"https://cloud.r-project.org/src/contrib/boot_1.3-18.tar.gz\"\n list_url = \"https://cloud.r-project.org/src/contrib/Archive/boot\"\n \n- version('1.3-23', sha256='30c89e19dd6490b943233e87dfe422bfef92cfbb7a7dfb5c17dfd9b2d63fa02f')\n+ version('1.3-23', sha256='79236a5a770dc8bf5ce25d9aa303c5dc0574d94aa043fd00b8b4c8ccc877357f')\n version('1.3-22', sha256='cf1f0cb1e0a7a36dcb6ae038f5d0211a0e7a009c149bc9d21acb9c58c38b4dfc')\n version('1.3-20', sha256='adcb90b72409705e3f9c69ea6c15673dcb649b464fed06723fe0930beac5212a')\n version('1.3-18', sha256='12fd237f810a69cc8d0a51a67c57eaf9506bf0341c764f8ab7c1feb73722235e')\n", "issue": "Installation issue: r-boot\n<!-- Thanks for taking the time to report this build failure. To proceed with the report please:\r\n\r\n1. Title the issue \"Installation issue: <name-of-the-package>\".\r\n2. Provide the information required below.\r\n\r\nWe encourage you to try, as much as possible, to reduce your problem to the minimal example that still reproduces the issue. That would help us a lot in fixing it quickly and effectively! -->\r\n\r\n### Steps to reproduce the issue\r\n\r\n<!-- Fill in the exact spec you are trying to build and the relevant part of the error message -->\r\n```console\r\n$ spack install r-boot%fj\r\n==> Error: ChecksumError: sha256 checksum failed for /home/users/ea01/ea0114/spack-stage/spack-stage-r-boot-1.3-23-mm6cmoaof62r5y527kz24snjifgwpir6/boot_1.3-23.tar.gz\r\n Expected 30c89e19dd6490b943233e87dfe422bfef92cfbb7a7dfb5c17dfd9b2d63fa02f but got 79236a5a770dc8bf5ce25d9aa303c5dc0574d94aa043fd00b8b4c8ccc877357f\r\n```\r\n\r\nBuild of `r-boot%fj` on 2020 Aug has a checksum error.\r\nThis version added to spack at 31 Aug 2019. Please see https://github.com/spack/spack/commit/661a894c85f451a4ef868abcc9871653914361bd\r\nAccording to our log, same build succeeded on 2019 Oct. \r\n\r\nhttps://cloud.r-project.org/src/contrib/boot_1.3-23.tar.gz seems to be changed between these attempts.\r\nWe found old(30c89e19) boot_1.3-23.tar.gz from http://in.archive.ubuntu.com/pool/universe/b/boot/boot_1.3-23.orig.tar.gz\r\nand compared with new(79236a5a) one.\r\nDifference was tribial. (\"Date/Publication\" in boot/DESCRIPTION, and MD5 of the file in boot/MD5)\r\nSo I would like to update checksum value.\r\n\r\nWe have another question.\r\nIn this case, we found \"old\" archive and proof the differnce is trivial.\r\nIf we found checksum mismatch and could not find \"old\" archive to verify, \r\nwhich is better in view of security?\r\n1. create issue and discuss\r\n2. directly make PR\r\n\r\n<!-- Some packages have maintainers who have volunteered to debug build failures. Run `spack maintainers <name-of-the-package>` and @mention them here if they exist. -->\r\n\r\n### General information\r\n\r\n<!-- These boxes can be checked by replacing [ ] with [x] or by clicking them after submitting the issue. -->\r\n- [ ] I have run `spack debug report` and reported the version of Spack/Python/Platform\r\n- [ ] I have run `spack maintainers <name-of-the-package>` and @mentioned any maintainers\r\n- [ ] I have uploaded the build log and environment files\r\n- [x] I have searched the issues of this repo and believe this is not a duplicate\r\n\n", "before_files": [{"content": "# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack import *\n\n\nclass RBoot(RPackage):\n \"\"\"Functions and datasets for bootstrapping from the book \"Bootstrap\n Methods and Their Application\" by A. C. Davison and D. V. Hinkley (1997,\n CUP), originally written by Angelo Canty for S.\"\"\"\n\n homepage = \"https://cloud.r-project.org/package=boot\"\n url = \"https://cloud.r-project.org/src/contrib/boot_1.3-18.tar.gz\"\n list_url = \"https://cloud.r-project.org/src/contrib/Archive/boot\"\n\n version('1.3-23', sha256='30c89e19dd6490b943233e87dfe422bfef92cfbb7a7dfb5c17dfd9b2d63fa02f')\n version('1.3-22', sha256='cf1f0cb1e0a7a36dcb6ae038f5d0211a0e7a009c149bc9d21acb9c58c38b4dfc')\n version('1.3-20', sha256='adcb90b72409705e3f9c69ea6c15673dcb649b464fed06723fe0930beac5212a')\n version('1.3-18', sha256='12fd237f810a69cc8d0a51a67c57eaf9506bf0341c764f8ab7c1feb73722235e')\n\n depends_on('[email protected]:', type=('build', 'run'))\n", "path": "var/spack/repos/builtin/packages/r-boot/package.py"}]}
1,833
471
gh_patches_debug_19842
rasdani/github-patches
git_diff
ietf-tools__datatracker-5726
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Don't crash when a nomcom is partially set up. Right now, if a nomcom group is created, and the associated NomCom object is not, anyone with a role in the nomcom group cannot use the datatracker - the construction of the menu crashes. The places that crash need to be protected against this misconfiguration (especially while the configuration of the Group and NomCom objects are done manually). See also https://github.com/ietf-tools/datatracker/issues/3289 </issue> <code> [start of ietf/group/templatetags/group_filters.py] 1 from django import template 2 3 import debug # pyflakes:ignore 4 5 from ietf.group.models import Group 6 7 register = template.Library() 8 9 @register.filter 10 def has_sessions(group,num): 11 return group.session_set.filter(meeting__number=num).exists() 12 13 @register.filter 14 def active_roles(queryset): 15 return queryset.filter(group__state_id__in=['active', 'bof']).exclude(group__acronym='secretariat') 16 17 @register.filter 18 def active_nomcoms(user): 19 if not (user and hasattr(user, "is_authenticated") and user.is_authenticated): 20 return [] 21 22 groups = [] 23 24 groups.extend(Group.objects.filter( 25 role__person__user=user, 26 type_id='nomcom', 27 state__slug='active').distinct().select_related("type")) 28 29 return groups 30 31 @register.inclusion_tag('person/person_link.html') 32 def role_person_link(role, **kwargs): 33 title = kwargs.get('title', '') 34 cls = kwargs.get('class', '') 35 name = role.person.name 36 plain_name = role.person.plain_name() 37 email = role.email.address 38 return {'name': name, 'plain_name': plain_name, 'email': email, 'title': title, 'class': cls} 39 [end of ietf/group/templatetags/group_filters.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ietf/group/templatetags/group_filters.py b/ietf/group/templatetags/group_filters.py --- a/ietf/group/templatetags/group_filters.py +++ b/ietf/group/templatetags/group_filters.py @@ -2,7 +2,7 @@ import debug # pyflakes:ignore -from ietf.group.models import Group +from ietf.nomcom.models import NomCom register = template.Library() @@ -19,14 +19,15 @@ if not (user and hasattr(user, "is_authenticated") and user.is_authenticated): return [] - groups = [] - - groups.extend(Group.objects.filter( - role__person__user=user, - type_id='nomcom', - state__slug='active').distinct().select_related("type")) - - return groups + return list( + NomCom.objects.filter( + group__role__person__user=user, + group__type_id='nomcom', # just in case... + group__state__slug='active', + ) + .distinct() + .order_by("group__acronym") + ) @register.inclusion_tag('person/person_link.html') def role_person_link(role, **kwargs):
{"golden_diff": "diff --git a/ietf/group/templatetags/group_filters.py b/ietf/group/templatetags/group_filters.py\n--- a/ietf/group/templatetags/group_filters.py\n+++ b/ietf/group/templatetags/group_filters.py\n@@ -2,7 +2,7 @@\n \n import debug # pyflakes:ignore\n \n-from ietf.group.models import Group\n+from ietf.nomcom.models import NomCom\n \n register = template.Library()\n \n@@ -19,14 +19,15 @@\n if not (user and hasattr(user, \"is_authenticated\") and user.is_authenticated):\n return []\n \n- groups = []\n-\n- groups.extend(Group.objects.filter(\n- role__person__user=user,\n- type_id='nomcom',\n- state__slug='active').distinct().select_related(\"type\"))\n-\n- return groups\n+ return list(\n+ NomCom.objects.filter(\n+ group__role__person__user=user,\n+ group__type_id='nomcom', # just in case...\n+ group__state__slug='active',\n+ )\n+ .distinct()\n+ .order_by(\"group__acronym\")\n+ )\n \n @register.inclusion_tag('person/person_link.html')\n def role_person_link(role, **kwargs):\n", "issue": "Don't crash when a nomcom is partially set up.\nRight now, if a nomcom group is created, and the associated NomCom object is not, anyone with a role in the nomcom group cannot use the datatracker - the construction of the menu crashes.\r\n\r\nThe places that crash need to be protected against this misconfiguration (especially while the configuration of the Group and NomCom objects are done manually).\r\n\r\nSee also https://github.com/ietf-tools/datatracker/issues/3289\n", "before_files": [{"content": "from django import template\n\nimport debug # pyflakes:ignore\n\nfrom ietf.group.models import Group\n\nregister = template.Library()\n\[email protected]\ndef has_sessions(group,num):\n return group.session_set.filter(meeting__number=num).exists()\n\[email protected]\ndef active_roles(queryset):\n return queryset.filter(group__state_id__in=['active', 'bof']).exclude(group__acronym='secretariat')\n \[email protected]\ndef active_nomcoms(user):\n if not (user and hasattr(user, \"is_authenticated\") and user.is_authenticated):\n return []\n\n groups = []\n\n groups.extend(Group.objects.filter(\n role__person__user=user,\n type_id='nomcom',\n state__slug='active').distinct().select_related(\"type\"))\n\n return groups\n\[email protected]_tag('person/person_link.html')\ndef role_person_link(role, **kwargs):\n title = kwargs.get('title', '')\n cls = kwargs.get('class', '')\n name = role.person.name\n plain_name = role.person.plain_name()\n email = role.email.address\n return {'name': name, 'plain_name': plain_name, 'email': email, 'title': title, 'class': cls}\n", "path": "ietf/group/templatetags/group_filters.py"}]}
987
284
gh_patches_debug_26878
rasdani/github-patches
git_diff
Lightning-AI__torchmetrics-2061
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> State Changes from List to Tensor After Calling Compute() ## 🐛 Bug I was logging a custom Metric result using the logger provided by LightningModule. The codes runs on 2 GPUs with ddp. While a list state whose `dist_reduce_fx` is "cat" was synchronized by calling `compute()` , I realized the behavior in `training_step()` and `validation_step()` were different. The state in `training_step()` where on_step=True and on_epoch=False (default setting for training) after calling `compute()` was always a list. However, in `validation_step()` where on_step=False and on_epoch=True (default setting for validation), the state became a Tensor. Such behavior is not explained in the doc. ### To Reproduce <!-- If you have a code sample, error messages, stack traces, please provide it here as well --> <details> <summary>Metric and Logging Results</summary> **Codes** ```python import logging import torch from torchmetrics import Metric from torchmetrics.functional.retrieval import retrieval_normalized_dcg class MyMetric(Metric): def __init__(self, top_k): super().__init__() self.top_k = top_k self.add_state("state_1", default=[], dist_reduce_fx="cat") def update(self, preds: Tensor, target: Tensor): assert preds.shape == target.shape self.state_1 += [self._metric(p, t) for p, t in zip(preds, target)] def compute(self): if isinstance(self.state_1, list): logging.warning(f"self.state_1.size(): {len(self.state_1)}") return torch.stack(self.state_1).mean() logging.warning(f"self.state_1.size(): {self.state_1.size()}") return self.state_1.mean() def _metric(self, preds: Tensor, target: Tensor): return retrieval_normalized_dcg(preds, target, k=self.top_k).float() ``` **Log Outputs** Training: `WARNING:self.state_1.size(): 40` Validation: `WARNING:self.state_1.size(): torch.Size([80])` </details> ### Expected behavior The state is always what it is defined in add_state(), i.e., a list. ### Environment - TorchMetrics version (and how you installed TM, e.g. `conda`, `pip`, build from source): 0.10.3 (conda-forge) - Python & PyTorch Version (e.g., 1.0): 3.10.12 - Any other relevant information such as OS (e.g., Linux): pytorch: 1.13.1 pytorch-lightning: 1.9.4 Linux: 5.15.0-78-generic ### Additional context <!-- Add any other context about the problem here. --> </issue> <code> [start of src/torchmetrics/utilities/__init__.py] 1 # Copyright The Lightning team. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 from torchmetrics.utilities.checks import check_forward_full_state_property 15 from torchmetrics.utilities.distributed import class_reduce, reduce 16 from torchmetrics.utilities.prints import rank_zero_debug, rank_zero_info, rank_zero_warn 17 18 __all__ = [ 19 "check_forward_full_state_property", 20 "class_reduce", 21 "reduce", 22 "rank_zero_debug", 23 "rank_zero_info", 24 "rank_zero_warn", 25 ] 26 [end of src/torchmetrics/utilities/__init__.py] [start of src/torchmetrics/utilities/distributed.py] 1 # Copyright The Lightning team. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 from typing import Any, List, Optional 15 16 import torch 17 from torch import Tensor 18 from torch.nn import functional as F # noqa: N812 19 from typing_extensions import Literal 20 21 22 def reduce(x: Tensor, reduction: Literal["elementwise_mean", "sum", "none", None]) -> Tensor: 23 """Reduces a given tensor by a given reduction method. 24 25 Args: 26 x: the tensor, which shall be reduced 27 reduction: a string specifying the reduction method ('elementwise_mean', 'none', 'sum') 28 29 Return: 30 reduced Tensor 31 32 Raise: 33 ValueError if an invalid reduction parameter was given 34 35 """ 36 if reduction == "elementwise_mean": 37 return torch.mean(x) 38 if reduction == "none" or reduction is None: 39 return x 40 if reduction == "sum": 41 return torch.sum(x) 42 raise ValueError("Reduction parameter unknown.") 43 44 45 def class_reduce( 46 num: Tensor, 47 denom: Tensor, 48 weights: Tensor, 49 class_reduction: Literal["micro", "macro", "weighted", "none", None] = "none", 50 ) -> Tensor: 51 """Reduce classification metrics of the form ``num / denom * weights``. 52 53 For example for calculating standard accuracy the num would be number of true positives per class, denom would be 54 the support per class, and weights would be a tensor of 1s. 55 56 Args: 57 num: numerator tensor 58 denom: denominator tensor 59 weights: weights for each class 60 class_reduction: reduction method for multiclass problems: 61 62 - ``'micro'``: calculate metrics globally (default) 63 - ``'macro'``: calculate metrics for each label, and find their unweighted mean. 64 - ``'weighted'``: calculate metrics for each label, and find their weighted mean. 65 - ``'none'`` or ``None``: returns calculated metric per class 66 67 Raises: 68 ValueError: 69 If ``class_reduction`` is none of ``"micro"``, ``"macro"``, ``"weighted"``, ``"none"`` or ``None``. 70 71 """ 72 valid_reduction = ("micro", "macro", "weighted", "none", None) 73 fraction = torch.sum(num) / torch.sum(denom) if class_reduction == "micro" else num / denom 74 75 # We need to take care of instances where the denom can be 0 76 # for some (or all) classes which will produce nans 77 fraction[fraction != fraction] = 0 78 79 if class_reduction == "micro": 80 return fraction 81 if class_reduction == "macro": 82 return torch.mean(fraction) 83 if class_reduction == "weighted": 84 return torch.sum(fraction * (weights.float() / torch.sum(weights))) 85 if class_reduction == "none" or class_reduction is None: 86 return fraction 87 88 raise ValueError(f"Reduction parameter {class_reduction} unknown. Choose between one of these: {valid_reduction}") 89 90 91 def _simple_gather_all_tensors(result: Tensor, group: Any, world_size: int) -> List[Tensor]: 92 gathered_result = [torch.zeros_like(result) for _ in range(world_size)] 93 torch.distributed.all_gather(gathered_result, result, group) 94 return gathered_result 95 96 97 def gather_all_tensors(result: Tensor, group: Optional[Any] = None) -> List[Tensor]: 98 """Gather all tensors from several ddp processes onto a list that is broadcasted to all processes. 99 100 Works on tensors that have the same number of dimensions, but where each dimension may differ. In this case 101 tensors are padded, gathered and then trimmed to secure equal workload for all processes. 102 103 Args: 104 result: the value to sync 105 group: the process group to gather results from. Defaults to all processes (world) 106 107 Return: 108 gathered_result: list with size equal to the process group where 109 ``gathered_result[i]`` corresponds to result tensor from process ``i`` 110 111 """ 112 if group is None: 113 group = torch.distributed.group.WORLD 114 115 # convert tensors to contiguous format 116 result = result.contiguous() 117 118 world_size = torch.distributed.get_world_size(group) 119 torch.distributed.barrier(group=group) 120 121 # if the tensor is scalar, things are easy 122 if result.ndim == 0: 123 return _simple_gather_all_tensors(result, group, world_size) 124 125 # 1. Gather sizes of all tensors 126 local_size = torch.tensor(result.shape, device=result.device) 127 local_sizes = [torch.zeros_like(local_size) for _ in range(world_size)] 128 torch.distributed.all_gather(local_sizes, local_size, group=group) 129 max_size = torch.stack(local_sizes).max(dim=0).values 130 all_sizes_equal = all(all(ls == max_size) for ls in local_sizes) 131 132 # 2. If shapes are all the same, then do a simple gather: 133 if all_sizes_equal: 134 return _simple_gather_all_tensors(result, group, world_size) 135 136 # 3. If not, we need to pad each local tensor to maximum size, gather and then truncate 137 pad_dims = [] 138 pad_by = (max_size - local_size).detach().cpu() 139 for val in reversed(pad_by): 140 pad_dims.append(0) 141 pad_dims.append(val.item()) 142 result_padded = F.pad(result, pad_dims) 143 gathered_result = [torch.zeros_like(result_padded) for _ in range(world_size)] 144 torch.distributed.all_gather(gathered_result, result_padded, group) 145 for idx, item_size in enumerate(local_sizes): 146 slice_param = [slice(dim_size) for dim_size in item_size] 147 gathered_result[idx] = gathered_result[idx][slice_param] 148 return gathered_result 149 [end of src/torchmetrics/utilities/distributed.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/torchmetrics/utilities/__init__.py b/src/torchmetrics/utilities/__init__.py --- a/src/torchmetrics/utilities/__init__.py +++ b/src/torchmetrics/utilities/__init__.py @@ -12,6 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. from torchmetrics.utilities.checks import check_forward_full_state_property +from torchmetrics.utilities.data import ( + dim_zero_cat, + dim_zero_max, + dim_zero_mean, + dim_zero_min, + dim_zero_sum, +) from torchmetrics.utilities.distributed import class_reduce, reduce from torchmetrics.utilities.prints import rank_zero_debug, rank_zero_info, rank_zero_warn @@ -22,4 +29,9 @@ "rank_zero_debug", "rank_zero_info", "rank_zero_warn", + "dim_zero_cat", + "dim_zero_max", + "dim_zero_mean", + "dim_zero_min", + "dim_zero_sum", ] diff --git a/src/torchmetrics/utilities/distributed.py b/src/torchmetrics/utilities/distributed.py --- a/src/torchmetrics/utilities/distributed.py +++ b/src/torchmetrics/utilities/distributed.py @@ -105,8 +105,7 @@ group: the process group to gather results from. Defaults to all processes (world) Return: - gathered_result: list with size equal to the process group where - ``gathered_result[i]`` corresponds to result tensor from process ``i`` + list with size equal to the process group where element i corresponds to result tensor from process i """ if group is None:
{"golden_diff": "diff --git a/src/torchmetrics/utilities/__init__.py b/src/torchmetrics/utilities/__init__.py\n--- a/src/torchmetrics/utilities/__init__.py\n+++ b/src/torchmetrics/utilities/__init__.py\n@@ -12,6 +12,13 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n from torchmetrics.utilities.checks import check_forward_full_state_property\n+from torchmetrics.utilities.data import (\n+ dim_zero_cat,\n+ dim_zero_max,\n+ dim_zero_mean,\n+ dim_zero_min,\n+ dim_zero_sum,\n+)\n from torchmetrics.utilities.distributed import class_reduce, reduce\n from torchmetrics.utilities.prints import rank_zero_debug, rank_zero_info, rank_zero_warn\n \n@@ -22,4 +29,9 @@\n \"rank_zero_debug\",\n \"rank_zero_info\",\n \"rank_zero_warn\",\n+ \"dim_zero_cat\",\n+ \"dim_zero_max\",\n+ \"dim_zero_mean\",\n+ \"dim_zero_min\",\n+ \"dim_zero_sum\",\n ]\ndiff --git a/src/torchmetrics/utilities/distributed.py b/src/torchmetrics/utilities/distributed.py\n--- a/src/torchmetrics/utilities/distributed.py\n+++ b/src/torchmetrics/utilities/distributed.py\n@@ -105,8 +105,7 @@\n group: the process group to gather results from. Defaults to all processes (world)\n \n Return:\n- gathered_result: list with size equal to the process group where\n- ``gathered_result[i]`` corresponds to result tensor from process ``i``\n+ list with size equal to the process group where element i corresponds to result tensor from process i\n \n \"\"\"\n if group is None:\n", "issue": "State Changes from List to Tensor After Calling Compute()\n## \ud83d\udc1b Bug\r\n\r\nI was logging a custom Metric result using the logger provided by LightningModule. The codes runs on 2 GPUs with ddp. While a list state whose `dist_reduce_fx` is \"cat\" was synchronized by calling `compute()` , I realized the behavior in `training_step()` and `validation_step()` were different.\r\n\r\nThe state in `training_step()` where on_step=True and on_epoch=False (default setting for training) after calling `compute()` was always a list. However, in `validation_step()` where on_step=False and on_epoch=True (default setting for validation), the state became a Tensor. Such behavior is not explained in the doc.\r\n\r\n### To Reproduce\r\n\r\n<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->\r\n\r\n<details>\r\n <summary>Metric and Logging Results</summary>\r\n\r\n**Codes**\r\n```python\r\nimport logging\r\n\r\nimport torch\r\nfrom torchmetrics import Metric\r\nfrom torchmetrics.functional.retrieval import retrieval_normalized_dcg\r\n\r\n\r\nclass MyMetric(Metric):\r\n def __init__(self, top_k):\r\n super().__init__()\r\n self.top_k = top_k\r\n self.add_state(\"state_1\", default=[], dist_reduce_fx=\"cat\")\r\n\r\n def update(self, preds: Tensor, target: Tensor):\r\n assert preds.shape == target.shape\r\n self.state_1 += [self._metric(p, t) for p, t in zip(preds, target)]\r\n\r\n def compute(self):\r\n if isinstance(self.state_1, list):\r\n logging.warning(f\"self.state_1.size(): {len(self.state_1)}\")\r\n return torch.stack(self.state_1).mean()\r\n logging.warning(f\"self.state_1.size(): {self.state_1.size()}\")\r\n return self.state_1.mean()\r\n\r\n def _metric(self, preds: Tensor, target: Tensor):\r\n return retrieval_normalized_dcg(preds, target, k=self.top_k).float()\r\n```\r\n\r\n**Log Outputs**\r\nTraining:\r\n`WARNING:self.state_1.size(): 40`\r\n\r\nValidation:\r\n`WARNING:self.state_1.size(): torch.Size([80])`\r\n\r\n</details>\r\n\r\n### Expected behavior\r\n\r\nThe state is always what it is defined in add_state(), i.e., a list.\r\n\r\n### Environment\r\n\r\n- TorchMetrics version (and how you installed TM, e.g. `conda`, `pip`, build from source):\r\n0.10.3 (conda-forge)\r\n- Python & PyTorch Version (e.g., 1.0):\r\n3.10.12\r\n- Any other relevant information such as OS (e.g., Linux):\r\npytorch: 1.13.1\r\npytorch-lightning: 1.9.4\r\nLinux: 5.15.0-78-generic\r\n\r\n### Additional context\r\n\r\n<!-- Add any other context about the problem here. -->\r\n\n", "before_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom torchmetrics.utilities.checks import check_forward_full_state_property\nfrom torchmetrics.utilities.distributed import class_reduce, reduce\nfrom torchmetrics.utilities.prints import rank_zero_debug, rank_zero_info, rank_zero_warn\n\n__all__ = [\n \"check_forward_full_state_property\",\n \"class_reduce\",\n \"reduce\",\n \"rank_zero_debug\",\n \"rank_zero_info\",\n \"rank_zero_warn\",\n]\n", "path": "src/torchmetrics/utilities/__init__.py"}, {"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, List, Optional\n\nimport torch\nfrom torch import Tensor\nfrom torch.nn import functional as F # noqa: N812\nfrom typing_extensions import Literal\n\n\ndef reduce(x: Tensor, reduction: Literal[\"elementwise_mean\", \"sum\", \"none\", None]) -> Tensor:\n \"\"\"Reduces a given tensor by a given reduction method.\n\n Args:\n x: the tensor, which shall be reduced\n reduction: a string specifying the reduction method ('elementwise_mean', 'none', 'sum')\n\n Return:\n reduced Tensor\n\n Raise:\n ValueError if an invalid reduction parameter was given\n\n \"\"\"\n if reduction == \"elementwise_mean\":\n return torch.mean(x)\n if reduction == \"none\" or reduction is None:\n return x\n if reduction == \"sum\":\n return torch.sum(x)\n raise ValueError(\"Reduction parameter unknown.\")\n\n\ndef class_reduce(\n num: Tensor,\n denom: Tensor,\n weights: Tensor,\n class_reduction: Literal[\"micro\", \"macro\", \"weighted\", \"none\", None] = \"none\",\n) -> Tensor:\n \"\"\"Reduce classification metrics of the form ``num / denom * weights``.\n\n For example for calculating standard accuracy the num would be number of true positives per class, denom would be\n the support per class, and weights would be a tensor of 1s.\n\n Args:\n num: numerator tensor\n denom: denominator tensor\n weights: weights for each class\n class_reduction: reduction method for multiclass problems:\n\n - ``'micro'``: calculate metrics globally (default)\n - ``'macro'``: calculate metrics for each label, and find their unweighted mean.\n - ``'weighted'``: calculate metrics for each label, and find their weighted mean.\n - ``'none'`` or ``None``: returns calculated metric per class\n\n Raises:\n ValueError:\n If ``class_reduction`` is none of ``\"micro\"``, ``\"macro\"``, ``\"weighted\"``, ``\"none\"`` or ``None``.\n\n \"\"\"\n valid_reduction = (\"micro\", \"macro\", \"weighted\", \"none\", None)\n fraction = torch.sum(num) / torch.sum(denom) if class_reduction == \"micro\" else num / denom\n\n # We need to take care of instances where the denom can be 0\n # for some (or all) classes which will produce nans\n fraction[fraction != fraction] = 0\n\n if class_reduction == \"micro\":\n return fraction\n if class_reduction == \"macro\":\n return torch.mean(fraction)\n if class_reduction == \"weighted\":\n return torch.sum(fraction * (weights.float() / torch.sum(weights)))\n if class_reduction == \"none\" or class_reduction is None:\n return fraction\n\n raise ValueError(f\"Reduction parameter {class_reduction} unknown. Choose between one of these: {valid_reduction}\")\n\n\ndef _simple_gather_all_tensors(result: Tensor, group: Any, world_size: int) -> List[Tensor]:\n gathered_result = [torch.zeros_like(result) for _ in range(world_size)]\n torch.distributed.all_gather(gathered_result, result, group)\n return gathered_result\n\n\ndef gather_all_tensors(result: Tensor, group: Optional[Any] = None) -> List[Tensor]:\n \"\"\"Gather all tensors from several ddp processes onto a list that is broadcasted to all processes.\n\n Works on tensors that have the same number of dimensions, but where each dimension may differ. In this case\n tensors are padded, gathered and then trimmed to secure equal workload for all processes.\n\n Args:\n result: the value to sync\n group: the process group to gather results from. Defaults to all processes (world)\n\n Return:\n gathered_result: list with size equal to the process group where\n ``gathered_result[i]`` corresponds to result tensor from process ``i``\n\n \"\"\"\n if group is None:\n group = torch.distributed.group.WORLD\n\n # convert tensors to contiguous format\n result = result.contiguous()\n\n world_size = torch.distributed.get_world_size(group)\n torch.distributed.barrier(group=group)\n\n # if the tensor is scalar, things are easy\n if result.ndim == 0:\n return _simple_gather_all_tensors(result, group, world_size)\n\n # 1. Gather sizes of all tensors\n local_size = torch.tensor(result.shape, device=result.device)\n local_sizes = [torch.zeros_like(local_size) for _ in range(world_size)]\n torch.distributed.all_gather(local_sizes, local_size, group=group)\n max_size = torch.stack(local_sizes).max(dim=0).values\n all_sizes_equal = all(all(ls == max_size) for ls in local_sizes)\n\n # 2. If shapes are all the same, then do a simple gather:\n if all_sizes_equal:\n return _simple_gather_all_tensors(result, group, world_size)\n\n # 3. If not, we need to pad each local tensor to maximum size, gather and then truncate\n pad_dims = []\n pad_by = (max_size - local_size).detach().cpu()\n for val in reversed(pad_by):\n pad_dims.append(0)\n pad_dims.append(val.item())\n result_padded = F.pad(result, pad_dims)\n gathered_result = [torch.zeros_like(result_padded) for _ in range(world_size)]\n torch.distributed.all_gather(gathered_result, result_padded, group)\n for idx, item_size in enumerate(local_sizes):\n slice_param = [slice(dim_size) for dim_size in item_size]\n gathered_result[idx] = gathered_result[idx][slice_param]\n return gathered_result\n", "path": "src/torchmetrics/utilities/distributed.py"}]}
3,140
379
gh_patches_debug_2896
rasdani/github-patches
git_diff
secdev__scapy-924
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> scapy import fail: NameError: name 'get_working_if' is not defined Hello, On an Ubuntu 16.04.3 LTS, when launching `scapy` CLI or importing `scapy.route` without any interface in use, I got the following error: ```Python Traceback (most recent call last): File "/usr/local/bin/scapy", line 25, in <module> interact() File "tools/scapy/scapy/main.py", line 421, in interact init_session(session_name, mydict) File "tools/scapy/scapy/main.py", line 293, in init_session scapy_builtins = {k: v for k, v in six.iteritems(importlib.import_module(".all", "scapy").__dict__) if _validate_local(k)} File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module __import__(name) File "tools/scapy/scapy/all.py", line 25, in <module> from scapy.route import * File "tools/scapy/scapy/route.py", line 195, in <module> conf.iface = get_working_if() NameError: name 'get_working_if' is not defined ``` A bisect leads to the recent commit fd50b349263256e0aaa69780937ae02d4f4ee46c, more likely to [this code](https://github.com/secdev/scapy/commit/fd50b349263256e0aaa69780937ae02d4f4ee46c#diff-3fc486d3e1085d11c80c20bab07375f2R194) I'm not sure to correctly understand for this code snippet and how it works on the different OS. This is why I prefer opening an issue :smiley: </issue> <code> [start of scapy/route.py] 1 ## This file is part of Scapy 2 ## See http://www.secdev.org/projects/scapy for more informations 3 ## Copyright (C) Philippe Biondi <[email protected]> 4 ## This program is published under a GPLv2 license 5 6 """ 7 Routing and handling of network interfaces. 8 """ 9 10 from __future__ import absolute_import 11 from scapy.utils import atol, ltoa, itom, pretty_routes 12 from scapy.config import conf 13 from scapy.error import Scapy_Exception, warning 14 from scapy.arch import WINDOWS 15 import scapy.consts 16 import scapy.modules.six as six 17 18 ############################## 19 ## Routing/Interfaces stuff ## 20 ############################## 21 22 class Route: 23 def __init__(self): 24 self.resync() 25 self.cache = {} 26 27 def invalidate_cache(self): 28 self.cache = {} 29 30 def resync(self): 31 from scapy.arch import read_routes 32 self.invalidate_cache() 33 self.routes = read_routes() 34 35 def __repr__(self): 36 rtlst = [] 37 for net, msk, gw, iface, addr, metric in self.routes: 38 rtlst.append((ltoa(net), 39 ltoa(msk), 40 gw, 41 (iface.name if not isinstance(iface, six.string_types) else iface), 42 addr, 43 str(metric))) 44 45 return pretty_routes(rtlst, 46 [("Network", "Netmask", "Gateway", "Iface", "Output IP", "Metric")]) 47 48 def make_route(self, host=None, net=None, gw=None, dev=None, metric=1): 49 from scapy.arch import get_if_addr 50 if host is not None: 51 thenet,msk = host,32 52 elif net is not None: 53 thenet,msk = net.split("/") 54 msk = int(msk) 55 else: 56 raise Scapy_Exception("make_route: Incorrect parameters. You should specify a host or a net") 57 if gw is None: 58 gw="0.0.0.0" 59 if dev is None: 60 if gw: 61 nhop = gw 62 else: 63 nhop = thenet 64 dev,ifaddr,x = self.route(nhop) 65 else: 66 ifaddr = get_if_addr(dev) 67 return (atol(thenet), itom(msk), gw, dev, ifaddr, metric) 68 69 def add(self, *args, **kargs): 70 """Ex: 71 add(net="192.168.1.0/24",gw="1.2.3.4") 72 """ 73 self.invalidate_cache() 74 self.routes.append(self.make_route(*args,**kargs)) 75 76 77 def delt(self, *args, **kargs): 78 """delt(host|net, gw|dev)""" 79 self.invalidate_cache() 80 route = self.make_route(*args,**kargs) 81 try: 82 i=self.routes.index(route) 83 del(self.routes[i]) 84 except ValueError: 85 warning("no matching route found") 86 87 def ifchange(self, iff, addr): 88 self.invalidate_cache() 89 the_addr,the_msk = (addr.split("/")+["32"])[:2] 90 the_msk = itom(int(the_msk)) 91 the_rawaddr = atol(the_addr) 92 the_net = the_rawaddr & the_msk 93 94 95 for i, route in enumerate(self.routes): 96 net, msk, gw, iface, addr, metric = route 97 if WINDOWS: 98 if iff.guid != iface.guid: 99 continue 100 elif iff != iface: 101 continue 102 if gw == '0.0.0.0': 103 self.routes[i] = (the_net,the_msk,gw,iface,the_addr,metric) 104 else: 105 self.routes[i] = (net,msk,gw,iface,the_addr,metric) 106 conf.netcache.flush() 107 108 109 110 def ifdel(self, iff): 111 self.invalidate_cache() 112 new_routes=[] 113 for rt in self.routes: 114 if WINDOWS: 115 if iff.guid == rt[3].guid: 116 continue 117 elif iff == rt[3]: 118 continue 119 new_routes.append(rt) 120 self.routes=new_routes 121 122 def ifadd(self, iff, addr): 123 self.invalidate_cache() 124 the_addr,the_msk = (addr.split("/")+["32"])[:2] 125 the_msk = itom(int(the_msk)) 126 the_rawaddr = atol(the_addr) 127 the_net = the_rawaddr & the_msk 128 self.routes.append((the_net,the_msk,'0.0.0.0',iff,the_addr,1)) 129 130 131 def route(self,dest,verbose=None): 132 if isinstance(dest, list) and dest: 133 dest = dest[0] 134 if dest in self.cache: 135 return self.cache[dest] 136 if verbose is None: 137 verbose=conf.verb 138 # Transform "192.168.*.1-5" to one IP of the set 139 dst = dest.split("/")[0] 140 dst = dst.replace("*","0") 141 while True: 142 l = dst.find("-") 143 if l < 0: 144 break 145 m = (dst[l:]+".").find(".") 146 dst = dst[:l]+dst[l+m:] 147 148 149 dst = atol(dst) 150 pathes=[] 151 for d,m,gw,i,a,me in self.routes: 152 if not a: # some interfaces may not currently be connected 153 continue 154 aa = atol(a) 155 if aa == dst: 156 pathes.append((0xffffffff, 1, (scapy.consts.LOOPBACK_INTERFACE,a,"0.0.0.0"))) 157 if (dst & m) == (d & m): 158 pathes.append((m, me, (i,a,gw))) 159 if not pathes: 160 if verbose: 161 warning("No route found (no default route?)") 162 return scapy.consts.LOOPBACK_INTERFACE,"0.0.0.0","0.0.0.0" 163 # Choose the more specific route 164 # Sort by greatest netmask 165 pathes.sort(key=lambda x: x[0], reverse=True) 166 # Get all pathes having the (same) greatest mask 167 pathes = [i for i in pathes if i[0] == pathes[0][0]] 168 # Tie-breaker: Metrics 169 pathes.sort(key=lambda x: x[1]) 170 # Return interface 171 ret = pathes[0][2] 172 self.cache[dest] = ret 173 return ret 174 175 def get_if_bcast(self, iff): 176 for net, msk, gw, iface, addr, metric in self.routes: 177 if net == 0: 178 continue 179 if WINDOWS: 180 if iff.guid != iface.guid: 181 continue 182 elif iff != iface: 183 continue 184 bcast = atol(addr)|(~msk&0xffffffff); # FIXME: check error in atol() 185 return ltoa(bcast) 186 warning("No broadcast address found for iface %s\n", iff); 187 188 conf.route=Route() 189 190 #XXX use "with" 191 _betteriface = conf.route.route("0.0.0.0", verbose=0)[0] 192 if ((_betteriface if (isinstance(_betteriface, six.string_types) or _betteriface is None) else _betteriface.name) != scapy.consts.LOOPBACK_NAME): 193 conf.iface = _betteriface 194 else: 195 conf.iface = get_working_if() 196 del(_betteriface) 197 [end of scapy/route.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scapy/route.py b/scapy/route.py --- a/scapy/route.py +++ b/scapy/route.py @@ -11,7 +11,7 @@ from scapy.utils import atol, ltoa, itom, pretty_routes from scapy.config import conf from scapy.error import Scapy_Exception, warning -from scapy.arch import WINDOWS +from scapy.arch import WINDOWS, get_working_if import scapy.consts import scapy.modules.six as six
{"golden_diff": "diff --git a/scapy/route.py b/scapy/route.py\n--- a/scapy/route.py\n+++ b/scapy/route.py\n@@ -11,7 +11,7 @@\n from scapy.utils import atol, ltoa, itom, pretty_routes\n from scapy.config import conf\n from scapy.error import Scapy_Exception, warning\n-from scapy.arch import WINDOWS\n+from scapy.arch import WINDOWS, get_working_if\n import scapy.consts\n import scapy.modules.six as six\n", "issue": "scapy import fail: NameError: name 'get_working_if' is not defined\nHello,\r\n\r\nOn an Ubuntu 16.04.3 LTS, when launching `scapy` CLI or importing `scapy.route` without any interface in use, I got the following error:\r\n```Python\r\nTraceback (most recent call last):\r\n File \"/usr/local/bin/scapy\", line 25, in <module>\r\n interact()\r\n File \"tools/scapy/scapy/main.py\", line 421, in interact\r\n init_session(session_name, mydict)\r\n File \"tools/scapy/scapy/main.py\", line 293, in init_session\r\n scapy_builtins = {k: v for k, v in six.iteritems(importlib.import_module(\".all\", \"scapy\").__dict__) if _validate_local(k)}\r\n File \"/usr/lib/python2.7/importlib/__init__.py\", line 37, in import_module\r\n __import__(name)\r\n File \"tools/scapy/scapy/all.py\", line 25, in <module>\r\n from scapy.route import *\r\n File \"tools/scapy/scapy/route.py\", line 195, in <module>\r\n conf.iface = get_working_if()\r\nNameError: name 'get_working_if' is not defined\r\n```\r\n\r\nA bisect leads to the recent commit fd50b349263256e0aaa69780937ae02d4f4ee46c, more likely to [this code](https://github.com/secdev/scapy/commit/fd50b349263256e0aaa69780937ae02d4f4ee46c#diff-3fc486d3e1085d11c80c20bab07375f2R194)\r\n\r\nI'm not sure to correctly understand for this code snippet and how it works on the different OS. This is why I prefer opening an issue :smiley: \n", "before_files": [{"content": "## This file is part of Scapy\n## See http://www.secdev.org/projects/scapy for more informations\n## Copyright (C) Philippe Biondi <[email protected]>\n## This program is published under a GPLv2 license\n\n\"\"\"\nRouting and handling of network interfaces.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom scapy.utils import atol, ltoa, itom, pretty_routes\nfrom scapy.config import conf\nfrom scapy.error import Scapy_Exception, warning\nfrom scapy.arch import WINDOWS\nimport scapy.consts\nimport scapy.modules.six as six\n\n##############################\n## Routing/Interfaces stuff ##\n##############################\n\nclass Route:\n def __init__(self):\n self.resync()\n self.cache = {}\n\n def invalidate_cache(self):\n self.cache = {}\n\n def resync(self):\n from scapy.arch import read_routes\n self.invalidate_cache()\n self.routes = read_routes()\n\n def __repr__(self):\n rtlst = []\n for net, msk, gw, iface, addr, metric in self.routes:\n rtlst.append((ltoa(net),\n ltoa(msk),\n gw,\n (iface.name if not isinstance(iface, six.string_types) else iface),\n addr,\n str(metric)))\n\n return pretty_routes(rtlst,\n [(\"Network\", \"Netmask\", \"Gateway\", \"Iface\", \"Output IP\", \"Metric\")])\n\n def make_route(self, host=None, net=None, gw=None, dev=None, metric=1):\n from scapy.arch import get_if_addr\n if host is not None:\n thenet,msk = host,32\n elif net is not None:\n thenet,msk = net.split(\"/\")\n msk = int(msk)\n else:\n raise Scapy_Exception(\"make_route: Incorrect parameters. You should specify a host or a net\")\n if gw is None:\n gw=\"0.0.0.0\"\n if dev is None:\n if gw:\n nhop = gw\n else:\n nhop = thenet\n dev,ifaddr,x = self.route(nhop)\n else:\n ifaddr = get_if_addr(dev)\n return (atol(thenet), itom(msk), gw, dev, ifaddr, metric)\n\n def add(self, *args, **kargs):\n \"\"\"Ex:\n add(net=\"192.168.1.0/24\",gw=\"1.2.3.4\")\n \"\"\"\n self.invalidate_cache()\n self.routes.append(self.make_route(*args,**kargs))\n\n \n def delt(self, *args, **kargs):\n \"\"\"delt(host|net, gw|dev)\"\"\"\n self.invalidate_cache()\n route = self.make_route(*args,**kargs)\n try:\n i=self.routes.index(route)\n del(self.routes[i])\n except ValueError:\n warning(\"no matching route found\")\n \n def ifchange(self, iff, addr):\n self.invalidate_cache()\n the_addr,the_msk = (addr.split(\"/\")+[\"32\"])[:2]\n the_msk = itom(int(the_msk))\n the_rawaddr = atol(the_addr)\n the_net = the_rawaddr & the_msk\n \n \n for i, route in enumerate(self.routes):\n net, msk, gw, iface, addr, metric = route\n if WINDOWS:\n if iff.guid != iface.guid:\n continue\n elif iff != iface:\n continue\n if gw == '0.0.0.0':\n self.routes[i] = (the_net,the_msk,gw,iface,the_addr,metric)\n else:\n self.routes[i] = (net,msk,gw,iface,the_addr,metric)\n conf.netcache.flush()\n \n \n\n def ifdel(self, iff):\n self.invalidate_cache()\n new_routes=[]\n for rt in self.routes:\n if WINDOWS:\n if iff.guid == rt[3].guid:\n continue\n elif iff == rt[3]:\n continue\n new_routes.append(rt)\n self.routes=new_routes\n \n def ifadd(self, iff, addr):\n self.invalidate_cache()\n the_addr,the_msk = (addr.split(\"/\")+[\"32\"])[:2]\n the_msk = itom(int(the_msk))\n the_rawaddr = atol(the_addr)\n the_net = the_rawaddr & the_msk\n self.routes.append((the_net,the_msk,'0.0.0.0',iff,the_addr,1))\n\n\n def route(self,dest,verbose=None):\n if isinstance(dest, list) and dest:\n dest = dest[0]\n if dest in self.cache:\n return self.cache[dest]\n if verbose is None:\n verbose=conf.verb\n # Transform \"192.168.*.1-5\" to one IP of the set\n dst = dest.split(\"/\")[0]\n dst = dst.replace(\"*\",\"0\") \n while True:\n l = dst.find(\"-\")\n if l < 0:\n break\n m = (dst[l:]+\".\").find(\".\")\n dst = dst[:l]+dst[l+m:]\n\n \n dst = atol(dst)\n pathes=[]\n for d,m,gw,i,a,me in self.routes:\n if not a: # some interfaces may not currently be connected\n continue\n aa = atol(a)\n if aa == dst:\n pathes.append((0xffffffff, 1, (scapy.consts.LOOPBACK_INTERFACE,a,\"0.0.0.0\")))\n if (dst & m) == (d & m):\n pathes.append((m, me, (i,a,gw)))\n if not pathes:\n if verbose:\n warning(\"No route found (no default route?)\")\n return scapy.consts.LOOPBACK_INTERFACE,\"0.0.0.0\",\"0.0.0.0\"\n # Choose the more specific route\n # Sort by greatest netmask\n pathes.sort(key=lambda x: x[0], reverse=True)\n # Get all pathes having the (same) greatest mask\n pathes = [i for i in pathes if i[0] == pathes[0][0]]\n # Tie-breaker: Metrics\n pathes.sort(key=lambda x: x[1])\n # Return interface\n ret = pathes[0][2]\n self.cache[dest] = ret\n return ret\n \n def get_if_bcast(self, iff):\n for net, msk, gw, iface, addr, metric in self.routes:\n if net == 0:\n continue\n if WINDOWS:\n if iff.guid != iface.guid:\n continue\n elif iff != iface:\n continue\n bcast = atol(addr)|(~msk&0xffffffff); # FIXME: check error in atol()\n return ltoa(bcast)\n warning(\"No broadcast address found for iface %s\\n\", iff);\n\nconf.route=Route()\n\n#XXX use \"with\"\n_betteriface = conf.route.route(\"0.0.0.0\", verbose=0)[0]\nif ((_betteriface if (isinstance(_betteriface, six.string_types) or _betteriface is None) else _betteriface.name) != scapy.consts.LOOPBACK_NAME):\n conf.iface = _betteriface\nelse:\n conf.iface = get_working_if()\ndel(_betteriface)\n", "path": "scapy/route.py"}]}
3,078
112
gh_patches_debug_1139
rasdani/github-patches
git_diff
mitmproxy__mitmproxy-1150
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ServerException instead of ProxyServerError ##### Steps to reproduce the problem: ``` >>> from libmproxy.proxy.server import ProxyServer >>> from libmproxy.proxy.config import ProxyConfig >>> ProxyServer(ProxyConfig(port=80)) (...) ServerException: Error starting proxy server: error(13, 'Permission denied') ``` ##### What is the expected behavior? According to the documentation: ``` >>> ProxyServer? Type: type String form: <class 'libmproxy.proxy.server.ProxyServer'> File: /usr/lib/python2.7/dist-packages/libmproxy/proxy/server.py Init definition: ProxyServer(self, config) Docstring: <no docstring> Init docstring: Raises ProxyServerError if there's a startup problem. ``` the expected behavior is ``` >>> ProxyServer(ProxyConfig(port=80)) (...) ProxyServerError: Error starting proxy server: error(13, 'Permission denied') ``` ##### What went wrong? Maybe the documentation is wrong? ##### Any other comments? Nope. --- Mitmproxy Version: 0.15-2 Operating System: Debian Sid. </issue> <code> [start of mitmproxy/proxy/server.py] 1 from __future__ import (absolute_import, print_function, division) 2 3 import traceback 4 import sys 5 import socket 6 import six 7 8 from netlib import tcp 9 from netlib.exceptions import TcpException 10 from netlib.http.http1 import assemble_response 11 from ..exceptions import ProtocolException, ServerException, ClientHandshakeException, Kill 12 from ..models import ClientConnection, make_error_response 13 from .modes import HttpUpstreamProxy, HttpProxy, ReverseProxy, TransparentProxy, Socks5Proxy 14 from .root_context import RootContext, Log 15 16 17 class DummyServer: 18 bound = False 19 20 def __init__(self, config): 21 self.config = config 22 23 def set_channel(self, channel): 24 pass 25 26 def serve_forever(self): 27 pass 28 29 def shutdown(self): 30 pass 31 32 33 class ProxyServer(tcp.TCPServer): 34 allow_reuse_address = True 35 bound = True 36 37 def __init__(self, config): 38 """ 39 Raises ProxyServerError if there's a startup problem. 40 """ 41 self.config = config 42 try: 43 super(ProxyServer, self).__init__((config.host, config.port)) 44 except socket.error as e: 45 six.reraise( 46 ServerException, 47 ServerException('Error starting proxy server: ' + repr(e)), 48 sys.exc_info()[2] 49 ) 50 self.channel = None 51 52 def set_channel(self, channel): 53 self.channel = channel 54 55 def handle_client_connection(self, conn, client_address): 56 h = ConnectionHandler( 57 conn, 58 client_address, 59 self.config, 60 self.channel 61 ) 62 h.handle() 63 64 65 class ConnectionHandler(object): 66 67 def __init__(self, client_conn, client_address, config, channel): 68 self.config = config 69 """@type: mitmproxy.proxy.config.ProxyConfig""" 70 self.client_conn = ClientConnection( 71 client_conn, 72 client_address, 73 None) 74 """@type: mitmproxy.proxy.connection.ClientConnection""" 75 self.channel = channel 76 """@type: mitmproxy.controller.Channel""" 77 78 def _create_root_layer(self): 79 root_context = RootContext( 80 self.client_conn, 81 self.config, 82 self.channel 83 ) 84 85 mode = self.config.mode 86 if mode == "upstream": 87 return HttpUpstreamProxy( 88 root_context, 89 self.config.upstream_server.address 90 ) 91 elif mode == "transparent": 92 return TransparentProxy(root_context) 93 elif mode == "reverse": 94 server_tls = self.config.upstream_server.scheme == "https" 95 return ReverseProxy( 96 root_context, 97 self.config.upstream_server.address, 98 server_tls 99 ) 100 elif mode == "socks5": 101 return Socks5Proxy(root_context) 102 elif mode == "regular": 103 return HttpProxy(root_context) 104 elif callable(mode): # pragma: no cover 105 return mode(root_context) 106 else: # pragma: no cover 107 raise ValueError("Unknown proxy mode: %s" % mode) 108 109 def handle(self): 110 self.log("clientconnect", "info") 111 112 root_layer = self._create_root_layer() 113 114 try: 115 root_layer = self.channel.ask("clientconnect", root_layer) 116 root_layer() 117 except Kill: 118 self.log("Connection killed", "info") 119 except ProtocolException as e: 120 121 if isinstance(e, ClientHandshakeException): 122 self.log( 123 "Client Handshake failed. " 124 "The client may not trust the proxy's certificate for {}.".format(e.server), 125 "error" 126 ) 127 self.log(repr(e), "debug") 128 else: 129 self.log(repr(e), "info") 130 131 self.log(traceback.format_exc(), "debug") 132 # If an error propagates to the topmost level, 133 # we send an HTTP error response, which is both 134 # understandable by HTTP clients and humans. 135 try: 136 error_response = make_error_response(502, repr(e)) 137 self.client_conn.send(assemble_response(error_response)) 138 except TcpException: 139 pass 140 except Exception: 141 self.log(traceback.format_exc(), "error") 142 print(traceback.format_exc(), file=sys.stderr) 143 print("mitmproxy has crashed!", file=sys.stderr) 144 print("Please lodge a bug report at: https://github.com/mitmproxy/mitmproxy", file=sys.stderr) 145 146 self.log("clientdisconnect", "info") 147 self.channel.tell("clientdisconnect", root_layer) 148 self.client_conn.finish() 149 150 def log(self, msg, level): 151 msg = "{}: {}".format(repr(self.client_conn.address), msg) 152 self.channel.tell("log", Log(msg, level)) 153 [end of mitmproxy/proxy/server.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mitmproxy/proxy/server.py b/mitmproxy/proxy/server.py --- a/mitmproxy/proxy/server.py +++ b/mitmproxy/proxy/server.py @@ -36,7 +36,7 @@ def __init__(self, config): """ - Raises ProxyServerError if there's a startup problem. + Raises ServerException if there's a startup problem. """ self.config = config try:
{"golden_diff": "diff --git a/mitmproxy/proxy/server.py b/mitmproxy/proxy/server.py\n--- a/mitmproxy/proxy/server.py\n+++ b/mitmproxy/proxy/server.py\n@@ -36,7 +36,7 @@\n \n def __init__(self, config):\n \"\"\"\n- Raises ProxyServerError if there's a startup problem.\n+ Raises ServerException if there's a startup problem.\n \"\"\"\n self.config = config\n try:\n", "issue": "ServerException instead of ProxyServerError\n##### Steps to reproduce the problem:\n\n```\n>>> from libmproxy.proxy.server import ProxyServer\n>>> from libmproxy.proxy.config import ProxyConfig\n>>> ProxyServer(ProxyConfig(port=80))\n(...)\nServerException: Error starting proxy server: error(13, 'Permission denied')\n```\n##### What is the expected behavior?\n\nAccording to the documentation:\n\n```\n>>> ProxyServer? \nType: type\nString form: <class 'libmproxy.proxy.server.ProxyServer'>\nFile: /usr/lib/python2.7/dist-packages/libmproxy/proxy/server.py\nInit definition: ProxyServer(self, config)\nDocstring: <no docstring>\nInit docstring: Raises ProxyServerError if there's a startup problem.\n```\n\nthe expected behavior is \n\n```\n>>> ProxyServer(ProxyConfig(port=80))\n(...)\nProxyServerError: Error starting proxy server: error(13, 'Permission denied')\n```\n##### What went wrong?\n\nMaybe the documentation is wrong?\n##### Any other comments?\n\nNope.\n\n---\n\nMitmproxy Version: 0.15-2\nOperating System: Debian Sid.\n\n", "before_files": [{"content": "from __future__ import (absolute_import, print_function, division)\n\nimport traceback\nimport sys\nimport socket\nimport six\n\nfrom netlib import tcp\nfrom netlib.exceptions import TcpException\nfrom netlib.http.http1 import assemble_response\nfrom ..exceptions import ProtocolException, ServerException, ClientHandshakeException, Kill\nfrom ..models import ClientConnection, make_error_response\nfrom .modes import HttpUpstreamProxy, HttpProxy, ReverseProxy, TransparentProxy, Socks5Proxy\nfrom .root_context import RootContext, Log\n\n\nclass DummyServer:\n bound = False\n\n def __init__(self, config):\n self.config = config\n\n def set_channel(self, channel):\n pass\n\n def serve_forever(self):\n pass\n\n def shutdown(self):\n pass\n\n\nclass ProxyServer(tcp.TCPServer):\n allow_reuse_address = True\n bound = True\n\n def __init__(self, config):\n \"\"\"\n Raises ProxyServerError if there's a startup problem.\n \"\"\"\n self.config = config\n try:\n super(ProxyServer, self).__init__((config.host, config.port))\n except socket.error as e:\n six.reraise(\n ServerException,\n ServerException('Error starting proxy server: ' + repr(e)),\n sys.exc_info()[2]\n )\n self.channel = None\n\n def set_channel(self, channel):\n self.channel = channel\n\n def handle_client_connection(self, conn, client_address):\n h = ConnectionHandler(\n conn,\n client_address,\n self.config,\n self.channel\n )\n h.handle()\n\n\nclass ConnectionHandler(object):\n\n def __init__(self, client_conn, client_address, config, channel):\n self.config = config\n \"\"\"@type: mitmproxy.proxy.config.ProxyConfig\"\"\"\n self.client_conn = ClientConnection(\n client_conn,\n client_address,\n None)\n \"\"\"@type: mitmproxy.proxy.connection.ClientConnection\"\"\"\n self.channel = channel\n \"\"\"@type: mitmproxy.controller.Channel\"\"\"\n\n def _create_root_layer(self):\n root_context = RootContext(\n self.client_conn,\n self.config,\n self.channel\n )\n\n mode = self.config.mode\n if mode == \"upstream\":\n return HttpUpstreamProxy(\n root_context,\n self.config.upstream_server.address\n )\n elif mode == \"transparent\":\n return TransparentProxy(root_context)\n elif mode == \"reverse\":\n server_tls = self.config.upstream_server.scheme == \"https\"\n return ReverseProxy(\n root_context,\n self.config.upstream_server.address,\n server_tls\n )\n elif mode == \"socks5\":\n return Socks5Proxy(root_context)\n elif mode == \"regular\":\n return HttpProxy(root_context)\n elif callable(mode): # pragma: no cover\n return mode(root_context)\n else: # pragma: no cover\n raise ValueError(\"Unknown proxy mode: %s\" % mode)\n\n def handle(self):\n self.log(\"clientconnect\", \"info\")\n\n root_layer = self._create_root_layer()\n\n try:\n root_layer = self.channel.ask(\"clientconnect\", root_layer)\n root_layer()\n except Kill:\n self.log(\"Connection killed\", \"info\")\n except ProtocolException as e:\n\n if isinstance(e, ClientHandshakeException):\n self.log(\n \"Client Handshake failed. \"\n \"The client may not trust the proxy's certificate for {}.\".format(e.server),\n \"error\"\n )\n self.log(repr(e), \"debug\")\n else:\n self.log(repr(e), \"info\")\n\n self.log(traceback.format_exc(), \"debug\")\n # If an error propagates to the topmost level,\n # we send an HTTP error response, which is both\n # understandable by HTTP clients and humans.\n try:\n error_response = make_error_response(502, repr(e))\n self.client_conn.send(assemble_response(error_response))\n except TcpException:\n pass\n except Exception:\n self.log(traceback.format_exc(), \"error\")\n print(traceback.format_exc(), file=sys.stderr)\n print(\"mitmproxy has crashed!\", file=sys.stderr)\n print(\"Please lodge a bug report at: https://github.com/mitmproxy/mitmproxy\", file=sys.stderr)\n\n self.log(\"clientdisconnect\", \"info\")\n self.channel.tell(\"clientdisconnect\", root_layer)\n self.client_conn.finish()\n\n def log(self, msg, level):\n msg = \"{}: {}\".format(repr(self.client_conn.address), msg)\n self.channel.tell(\"log\", Log(msg, level))\n", "path": "mitmproxy/proxy/server.py"}]}
2,124
99
gh_patches_debug_22479
rasdani/github-patches
git_diff
common-workflow-language__cwltool-632
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> boolean without prefix should not add anything to the command line thanks to @milos-ljubinkovic for finding this! </issue> <code> [start of cwltool/builder.py] 1 from __future__ import absolute_import 2 import copy 3 import os 4 import logging 5 from typing import Any, Callable, Dict, List, Text, Type, Union 6 7 import six 8 from six import iteritems, string_types 9 10 import avro 11 import schema_salad.validate as validate 12 from schema_salad.sourceline import SourceLine 13 14 from . import expression 15 from .errors import WorkflowException 16 from .mutation import MutationManager 17 from .pathmapper import (PathMapper, get_listing, normalizeFilesDirs, 18 visit_class) 19 from .stdfsaccess import StdFsAccess 20 from .utils import aslist, get_feature, docker_windows_path_adjust, onWindows 21 22 _logger = logging.getLogger("cwltool") 23 24 AvroSchemaFromJSONData = avro.schema.make_avsc_object 25 26 CONTENT_LIMIT = 64 * 1024 27 28 29 def substitute(value, replace): # type: (Text, Text) -> Text 30 if replace[0] == "^": 31 return substitute(value[0:value.rindex('.')], replace[1:]) 32 else: 33 return value + replace 34 35 36 class Builder(object): 37 def __init__(self): # type: () -> None 38 self.names = None # type: avro.schema.Names 39 self.schemaDefs = None # type: Dict[Text, Dict[Text, Any]] 40 self.files = None # type: List[Dict[Text, Text]] 41 self.fs_access = None # type: StdFsAccess 42 self.job = None # type: Dict[Text, Union[Dict[Text, Any], List, Text]] 43 self.requirements = None # type: List[Dict[Text, Any]] 44 self.hints = None # type: List[Dict[Text, Any]] 45 self.outdir = None # type: Text 46 self.tmpdir = None # type: Text 47 self.resources = None # type: Dict[Text, Union[int, Text]] 48 self.bindings = [] # type: List[Dict[Text, Any]] 49 self.timeout = None # type: int 50 self.pathmapper = None # type: PathMapper 51 self.stagedir = None # type: Text 52 self.make_fs_access = None # type: Type[StdFsAccess] 53 self.debug = False # type: bool 54 self.js_console = False # type: bool 55 self.mutation_manager = None # type: MutationManager 56 self.force_docker_pull = False # type: bool 57 58 # One of "no_listing", "shallow_listing", "deep_listing" 59 # Will be default "no_listing" for CWL v1.1 60 self.loadListing = "deep_listing" # type: Union[None, str] 61 62 self.find_default_container = None # type: Callable[[], Text] 63 self.job_script_provider = None # type: Any 64 65 def build_job_script(self, commands): 66 # type: (List[Text]) -> Text 67 build_job_script_method = getattr(self.job_script_provider, "build_job_script", None) # type: Callable[[Builder, Union[List[str],List[Text]]], Text] 68 if build_job_script_method: 69 return build_job_script_method(self, commands) 70 else: 71 return None 72 73 def bind_input(self, schema, datum, lead_pos=None, tail_pos=None): 74 # type: (Dict[Text, Any], Any, Union[int, List[int]], List[int]) -> List[Dict[Text, Any]] 75 if tail_pos is None: 76 tail_pos = [] 77 if lead_pos is None: 78 lead_pos = [] 79 bindings = [] # type: List[Dict[Text,Text]] 80 binding = None # type: Dict[Text,Any] 81 if "inputBinding" in schema and isinstance(schema["inputBinding"], dict): 82 binding = copy.copy(schema["inputBinding"]) 83 84 if "position" in binding: 85 binding["position"] = aslist(lead_pos) + aslist(binding["position"]) + aslist(tail_pos) 86 else: 87 binding["position"] = aslist(lead_pos) + [0] + aslist(tail_pos) 88 89 binding["datum"] = datum 90 91 # Handle union types 92 if isinstance(schema["type"], list): 93 for t in schema["type"]: 94 if isinstance(t, (str, Text)) and self.names.has_name(t, ""): 95 avsc = self.names.get_name(t, "") 96 elif isinstance(t, dict) and "name" in t and self.names.has_name(t["name"], ""): 97 avsc = self.names.get_name(t["name"], "") 98 else: 99 avsc = AvroSchemaFromJSONData(t, self.names) 100 if validate.validate(avsc, datum): 101 schema = copy.deepcopy(schema) 102 schema["type"] = t 103 return self.bind_input(schema, datum, lead_pos=lead_pos, tail_pos=tail_pos) 104 raise validate.ValidationException(u"'%s' is not a valid union %s" % (datum, schema["type"])) 105 elif isinstance(schema["type"], dict): 106 st = copy.deepcopy(schema["type"]) 107 if binding and "inputBinding" not in st and st["type"] == "array" and "itemSeparator" not in binding: 108 st["inputBinding"] = {} 109 for k in ("secondaryFiles", "format", "streamable"): 110 if k in schema: 111 st[k] = schema[k] 112 bindings.extend(self.bind_input(st, datum, lead_pos=lead_pos, tail_pos=tail_pos)) 113 else: 114 if schema["type"] in self.schemaDefs: 115 schema = self.schemaDefs[schema["type"]] 116 117 if schema["type"] == "record": 118 for f in schema["fields"]: 119 if f["name"] in datum: 120 bindings.extend(self.bind_input(f, datum[f["name"]], lead_pos=lead_pos, tail_pos=f["name"])) 121 else: 122 datum[f["name"]] = f.get("default") 123 124 if schema["type"] == "array": 125 for n, item in enumerate(datum): 126 b2 = None 127 if binding: 128 b2 = copy.deepcopy(binding) 129 b2["datum"] = item 130 itemschema = { 131 u"type": schema["items"], 132 u"inputBinding": b2 133 } 134 for k in ("secondaryFiles", "format", "streamable"): 135 if k in schema: 136 itemschema[k] = schema[k] 137 bindings.extend( 138 self.bind_input(itemschema, item, lead_pos=n, tail_pos=tail_pos)) 139 binding = None 140 141 if schema["type"] == "File": 142 self.files.append(datum) 143 if binding: 144 if binding.get("loadContents"): 145 with self.fs_access.open(datum["location"], "rb") as f: 146 datum["contents"] = f.read(CONTENT_LIMIT) 147 148 if "secondaryFiles" in schema: 149 if "secondaryFiles" not in datum: 150 datum["secondaryFiles"] = [] 151 for sf in aslist(schema["secondaryFiles"]): 152 if isinstance(sf, dict) or "$(" in sf or "${" in sf: 153 sfpath = self.do_eval(sf, context=datum) 154 else: 155 sfpath = substitute(datum["basename"], sf) 156 for sfname in aslist(sfpath): 157 found = False 158 for d in datum["secondaryFiles"]: 159 if not d.get("basename"): 160 d["basename"] = d["location"][d["location"].rindex("/")+1:] 161 if d["basename"] == sfname: 162 found = True 163 if not found: 164 if isinstance(sfname, dict): 165 datum["secondaryFiles"].append(sfname) 166 else: 167 datum["secondaryFiles"].append({ 168 "location": datum["location"][0:datum["location"].rindex("/")+1]+sfname, 169 "basename": sfname, 170 "class": "File"}) 171 172 normalizeFilesDirs(datum["secondaryFiles"]) 173 174 def _capture_files(f): 175 self.files.append(f) 176 return f 177 178 visit_class(datum.get("secondaryFiles", []), ("File", "Directory"), _capture_files) 179 180 if schema["type"] == "Directory": 181 ll = self.loadListing or (binding and binding.get("loadListing")) 182 if ll and ll != "no_listing": 183 get_listing(self.fs_access, datum, (ll == "deep_listing")) 184 self.files.append(datum) 185 186 # Position to front of the sort key 187 if binding: 188 for bi in bindings: 189 bi["position"] = binding["position"] + bi["position"] 190 bindings.append(binding) 191 192 return bindings 193 194 def tostr(self, value): # type: (Any) -> Text 195 if isinstance(value, dict) and value.get("class") in ("File", "Directory"): 196 if "path" not in value: 197 raise WorkflowException(u"%s object missing \"path\": %s" % (value["class"], value)) 198 199 # Path adjust for windows file path when passing to docker, docker accepts unix like path only 200 (docker_req, docker_is_req) = get_feature(self, "DockerRequirement") 201 if onWindows() and docker_req is not None: # docker_req is none only when there is no dockerRequirement mentioned in hints and Requirement 202 return docker_windows_path_adjust(value["path"]) 203 return value["path"] 204 else: 205 return Text(value) 206 207 def generate_arg(self, binding): # type: (Dict[Text,Any]) -> List[Text] 208 value = binding.get("datum") 209 if "valueFrom" in binding: 210 with SourceLine(binding, "valueFrom", WorkflowException, _logger.isEnabledFor(logging.DEBUG)): 211 value = self.do_eval(binding["valueFrom"], context=value) 212 213 prefix = binding.get("prefix") 214 sep = binding.get("separate", True) 215 216 l = [] # type: List[Dict[Text,Text]] 217 if isinstance(value, list): 218 if binding.get("itemSeparator"): 219 l = [binding["itemSeparator"].join([self.tostr(v) for v in value])] 220 elif binding.get("valueFrom"): 221 value = [self.tostr(v) for v in value] 222 return ([prefix] if prefix else []) + value 223 elif prefix: 224 return [prefix] 225 else: 226 return [] 227 elif isinstance(value, dict) and value.get("class") in ("File", "Directory"): 228 l = [value] 229 elif isinstance(value, dict): 230 return [prefix] if prefix else [] 231 elif value is True and prefix: 232 return [prefix] 233 elif value is False or value is None: 234 return [] 235 else: 236 l = [value] 237 238 args = [] 239 for j in l: 240 if sep: 241 args.extend([prefix, self.tostr(j)]) 242 else: 243 args.append(prefix + self.tostr(j)) 244 245 return [a for a in args if a is not None] 246 247 def do_eval(self, ex, context=None, pull_image=True, recursive=False): 248 # type: (Union[Dict[Text, Text], Text], Any, bool, bool) -> Any 249 if recursive: 250 if isinstance(ex, dict): 251 return {k: self.do_eval(v, context, pull_image, recursive) for k, v in iteritems(ex)} 252 if isinstance(ex, list): 253 return [self.do_eval(v, context, pull_image, recursive) for v in ex] 254 255 return expression.do_eval(ex, self.job, self.requirements, 256 self.outdir, self.tmpdir, 257 self.resources, 258 context=context, pull_image=pull_image, 259 timeout=self.timeout, 260 debug=self.debug, 261 js_console=self.js_console, 262 force_docker_pull=self.force_docker_pull) 263 [end of cwltool/builder.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/cwltool/builder.py b/cwltool/builder.py --- a/cwltool/builder.py +++ b/cwltool/builder.py @@ -230,7 +230,7 @@ return [prefix] if prefix else [] elif value is True and prefix: return [prefix] - elif value is False or value is None: + elif value is False or value is None or (value is True and not prefix): return [] else: l = [value] @@ -251,7 +251,8 @@ return {k: self.do_eval(v, context, pull_image, recursive) for k, v in iteritems(ex)} if isinstance(ex, list): return [self.do_eval(v, context, pull_image, recursive) for v in ex] - + if context is None and type(ex) is str and "self" in ex: + return None return expression.do_eval(ex, self.job, self.requirements, self.outdir, self.tmpdir, self.resources,
{"golden_diff": "diff --git a/cwltool/builder.py b/cwltool/builder.py\n--- a/cwltool/builder.py\n+++ b/cwltool/builder.py\n@@ -230,7 +230,7 @@\n return [prefix] if prefix else []\n elif value is True and prefix:\n return [prefix]\n- elif value is False or value is None:\n+ elif value is False or value is None or (value is True and not prefix):\n return []\n else:\n l = [value]\n@@ -251,7 +251,8 @@\n return {k: self.do_eval(v, context, pull_image, recursive) for k, v in iteritems(ex)}\n if isinstance(ex, list):\n return [self.do_eval(v, context, pull_image, recursive) for v in ex]\n-\n+ if context is None and type(ex) is str and \"self\" in ex:\n+ return None\n return expression.do_eval(ex, self.job, self.requirements,\n self.outdir, self.tmpdir,\n self.resources,\n", "issue": "boolean without prefix should not add anything to the command line\nthanks to @milos-ljubinkovic for finding this!\n", "before_files": [{"content": "from __future__ import absolute_import\nimport copy\nimport os\nimport logging\nfrom typing import Any, Callable, Dict, List, Text, Type, Union\n\nimport six\nfrom six import iteritems, string_types\n\nimport avro\nimport schema_salad.validate as validate\nfrom schema_salad.sourceline import SourceLine\n\nfrom . import expression\nfrom .errors import WorkflowException\nfrom .mutation import MutationManager\nfrom .pathmapper import (PathMapper, get_listing, normalizeFilesDirs,\n visit_class)\nfrom .stdfsaccess import StdFsAccess\nfrom .utils import aslist, get_feature, docker_windows_path_adjust, onWindows\n\n_logger = logging.getLogger(\"cwltool\")\n\nAvroSchemaFromJSONData = avro.schema.make_avsc_object\n\nCONTENT_LIMIT = 64 * 1024\n\n\ndef substitute(value, replace): # type: (Text, Text) -> Text\n if replace[0] == \"^\":\n return substitute(value[0:value.rindex('.')], replace[1:])\n else:\n return value + replace\n\n\nclass Builder(object):\n def __init__(self): # type: () -> None\n self.names = None # type: avro.schema.Names\n self.schemaDefs = None # type: Dict[Text, Dict[Text, Any]]\n self.files = None # type: List[Dict[Text, Text]]\n self.fs_access = None # type: StdFsAccess\n self.job = None # type: Dict[Text, Union[Dict[Text, Any], List, Text]]\n self.requirements = None # type: List[Dict[Text, Any]]\n self.hints = None # type: List[Dict[Text, Any]]\n self.outdir = None # type: Text\n self.tmpdir = None # type: Text\n self.resources = None # type: Dict[Text, Union[int, Text]]\n self.bindings = [] # type: List[Dict[Text, Any]]\n self.timeout = None # type: int\n self.pathmapper = None # type: PathMapper\n self.stagedir = None # type: Text\n self.make_fs_access = None # type: Type[StdFsAccess]\n self.debug = False # type: bool\n self.js_console = False # type: bool\n self.mutation_manager = None # type: MutationManager\n self.force_docker_pull = False # type: bool\n\n # One of \"no_listing\", \"shallow_listing\", \"deep_listing\"\n # Will be default \"no_listing\" for CWL v1.1\n self.loadListing = \"deep_listing\" # type: Union[None, str]\n\n self.find_default_container = None # type: Callable[[], Text]\n self.job_script_provider = None # type: Any\n\n def build_job_script(self, commands):\n # type: (List[Text]) -> Text\n build_job_script_method = getattr(self.job_script_provider, \"build_job_script\", None) # type: Callable[[Builder, Union[List[str],List[Text]]], Text]\n if build_job_script_method:\n return build_job_script_method(self, commands)\n else:\n return None\n\n def bind_input(self, schema, datum, lead_pos=None, tail_pos=None):\n # type: (Dict[Text, Any], Any, Union[int, List[int]], List[int]) -> List[Dict[Text, Any]]\n if tail_pos is None:\n tail_pos = []\n if lead_pos is None:\n lead_pos = []\n bindings = [] # type: List[Dict[Text,Text]]\n binding = None # type: Dict[Text,Any]\n if \"inputBinding\" in schema and isinstance(schema[\"inputBinding\"], dict):\n binding = copy.copy(schema[\"inputBinding\"])\n\n if \"position\" in binding:\n binding[\"position\"] = aslist(lead_pos) + aslist(binding[\"position\"]) + aslist(tail_pos)\n else:\n binding[\"position\"] = aslist(lead_pos) + [0] + aslist(tail_pos)\n\n binding[\"datum\"] = datum\n\n # Handle union types\n if isinstance(schema[\"type\"], list):\n for t in schema[\"type\"]:\n if isinstance(t, (str, Text)) and self.names.has_name(t, \"\"):\n avsc = self.names.get_name(t, \"\")\n elif isinstance(t, dict) and \"name\" in t and self.names.has_name(t[\"name\"], \"\"):\n avsc = self.names.get_name(t[\"name\"], \"\")\n else:\n avsc = AvroSchemaFromJSONData(t, self.names)\n if validate.validate(avsc, datum):\n schema = copy.deepcopy(schema)\n schema[\"type\"] = t\n return self.bind_input(schema, datum, lead_pos=lead_pos, tail_pos=tail_pos)\n raise validate.ValidationException(u\"'%s' is not a valid union %s\" % (datum, schema[\"type\"]))\n elif isinstance(schema[\"type\"], dict):\n st = copy.deepcopy(schema[\"type\"])\n if binding and \"inputBinding\" not in st and st[\"type\"] == \"array\" and \"itemSeparator\" not in binding:\n st[\"inputBinding\"] = {}\n for k in (\"secondaryFiles\", \"format\", \"streamable\"):\n if k in schema:\n st[k] = schema[k]\n bindings.extend(self.bind_input(st, datum, lead_pos=lead_pos, tail_pos=tail_pos))\n else:\n if schema[\"type\"] in self.schemaDefs:\n schema = self.schemaDefs[schema[\"type\"]]\n\n if schema[\"type\"] == \"record\":\n for f in schema[\"fields\"]:\n if f[\"name\"] in datum:\n bindings.extend(self.bind_input(f, datum[f[\"name\"]], lead_pos=lead_pos, tail_pos=f[\"name\"]))\n else:\n datum[f[\"name\"]] = f.get(\"default\")\n\n if schema[\"type\"] == \"array\":\n for n, item in enumerate(datum):\n b2 = None\n if binding:\n b2 = copy.deepcopy(binding)\n b2[\"datum\"] = item\n itemschema = {\n u\"type\": schema[\"items\"],\n u\"inputBinding\": b2\n }\n for k in (\"secondaryFiles\", \"format\", \"streamable\"):\n if k in schema:\n itemschema[k] = schema[k]\n bindings.extend(\n self.bind_input(itemschema, item, lead_pos=n, tail_pos=tail_pos))\n binding = None\n\n if schema[\"type\"] == \"File\":\n self.files.append(datum)\n if binding:\n if binding.get(\"loadContents\"):\n with self.fs_access.open(datum[\"location\"], \"rb\") as f:\n datum[\"contents\"] = f.read(CONTENT_LIMIT)\n\n if \"secondaryFiles\" in schema:\n if \"secondaryFiles\" not in datum:\n datum[\"secondaryFiles\"] = []\n for sf in aslist(schema[\"secondaryFiles\"]):\n if isinstance(sf, dict) or \"$(\" in sf or \"${\" in sf:\n sfpath = self.do_eval(sf, context=datum)\n else:\n sfpath = substitute(datum[\"basename\"], sf)\n for sfname in aslist(sfpath):\n found = False\n for d in datum[\"secondaryFiles\"]:\n if not d.get(\"basename\"):\n d[\"basename\"] = d[\"location\"][d[\"location\"].rindex(\"/\")+1:]\n if d[\"basename\"] == sfname:\n found = True\n if not found:\n if isinstance(sfname, dict):\n datum[\"secondaryFiles\"].append(sfname)\n else:\n datum[\"secondaryFiles\"].append({\n \"location\": datum[\"location\"][0:datum[\"location\"].rindex(\"/\")+1]+sfname,\n \"basename\": sfname,\n \"class\": \"File\"})\n\n normalizeFilesDirs(datum[\"secondaryFiles\"])\n\n def _capture_files(f):\n self.files.append(f)\n return f\n\n visit_class(datum.get(\"secondaryFiles\", []), (\"File\", \"Directory\"), _capture_files)\n\n if schema[\"type\"] == \"Directory\":\n ll = self.loadListing or (binding and binding.get(\"loadListing\"))\n if ll and ll != \"no_listing\":\n get_listing(self.fs_access, datum, (ll == \"deep_listing\"))\n self.files.append(datum)\n\n # Position to front of the sort key\n if binding:\n for bi in bindings:\n bi[\"position\"] = binding[\"position\"] + bi[\"position\"]\n bindings.append(binding)\n\n return bindings\n\n def tostr(self, value): # type: (Any) -> Text\n if isinstance(value, dict) and value.get(\"class\") in (\"File\", \"Directory\"):\n if \"path\" not in value:\n raise WorkflowException(u\"%s object missing \\\"path\\\": %s\" % (value[\"class\"], value))\n\n # Path adjust for windows file path when passing to docker, docker accepts unix like path only\n (docker_req, docker_is_req) = get_feature(self, \"DockerRequirement\")\n if onWindows() and docker_req is not None: # docker_req is none only when there is no dockerRequirement mentioned in hints and Requirement\n return docker_windows_path_adjust(value[\"path\"])\n return value[\"path\"]\n else:\n return Text(value)\n\n def generate_arg(self, binding): # type: (Dict[Text,Any]) -> List[Text]\n value = binding.get(\"datum\")\n if \"valueFrom\" in binding:\n with SourceLine(binding, \"valueFrom\", WorkflowException, _logger.isEnabledFor(logging.DEBUG)):\n value = self.do_eval(binding[\"valueFrom\"], context=value)\n\n prefix = binding.get(\"prefix\")\n sep = binding.get(\"separate\", True)\n\n l = [] # type: List[Dict[Text,Text]]\n if isinstance(value, list):\n if binding.get(\"itemSeparator\"):\n l = [binding[\"itemSeparator\"].join([self.tostr(v) for v in value])]\n elif binding.get(\"valueFrom\"):\n value = [self.tostr(v) for v in value]\n return ([prefix] if prefix else []) + value\n elif prefix:\n return [prefix]\n else:\n return []\n elif isinstance(value, dict) and value.get(\"class\") in (\"File\", \"Directory\"):\n l = [value]\n elif isinstance(value, dict):\n return [prefix] if prefix else []\n elif value is True and prefix:\n return [prefix]\n elif value is False or value is None:\n return []\n else:\n l = [value]\n\n args = []\n for j in l:\n if sep:\n args.extend([prefix, self.tostr(j)])\n else:\n args.append(prefix + self.tostr(j))\n\n return [a for a in args if a is not None]\n\n def do_eval(self, ex, context=None, pull_image=True, recursive=False):\n # type: (Union[Dict[Text, Text], Text], Any, bool, bool) -> Any\n if recursive:\n if isinstance(ex, dict):\n return {k: self.do_eval(v, context, pull_image, recursive) for k, v in iteritems(ex)}\n if isinstance(ex, list):\n return [self.do_eval(v, context, pull_image, recursive) for v in ex]\n\n return expression.do_eval(ex, self.job, self.requirements,\n self.outdir, self.tmpdir,\n self.resources,\n context=context, pull_image=pull_image,\n timeout=self.timeout,\n debug=self.debug,\n js_console=self.js_console,\n force_docker_pull=self.force_docker_pull)\n", "path": "cwltool/builder.py"}]}
3,778
233
gh_patches_debug_13573
rasdani/github-patches
git_diff
vyperlang__vyper-891
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Disallow int128->int128 conversion. ### What's your issue about? Disallow`int128` to be converted to `int128`, follows https://github.com/ethereum/vyper/pull/882. ### How can it be fixed? Fill this in if you know how to fix it. #### Cute Animal Picture ![](https://peopledotcom.files.wordpress.com/2011/06/panda-660.jpg) </issue> <code> [start of vyper/types/convert.py] 1 from vyper.functions.signature import ( 2 signature 3 ) 4 from vyper.parser.parser_utils import ( 5 LLLnode, 6 getpos, 7 byte_array_to_num 8 ) 9 from vyper.exceptions import ( 10 InvalidLiteralException, 11 TypeMismatchException, 12 ) 13 from vyper.types import ( 14 BaseType, 15 ) 16 from vyper.types import ( 17 get_type, 18 ) 19 from vyper.utils import ( 20 DECIMAL_DIVISOR, 21 MemoryPositions, 22 SizeLimits 23 ) 24 25 26 @signature(('int128', 'uint256', 'bytes32', 'bytes'), 'str_literal') 27 def to_int128(expr, args, kwargs, context): 28 in_node = args[0] 29 typ, len = get_type(in_node) 30 if typ in ('int128', 'uint256', 'bytes32'): 31 if in_node.typ.is_literal and not SizeLimits.in_bounds('int128', in_node.value): 32 raise InvalidLiteralException("Number out of range: {}".format(in_node.value), expr) 33 return LLLnode.from_list( 34 ['clamp', ['mload', MemoryPositions.MINNUM], in_node, 35 ['mload', MemoryPositions.MAXNUM]], typ=BaseType('int128', in_node.typ.unit), pos=getpos(expr) 36 ) 37 else: 38 return byte_array_to_num(in_node, expr, 'int128') 39 40 41 @signature(('num_literal', 'int128', 'bytes32'), 'str_literal') 42 def to_uint256(expr, args, kwargs, context): 43 in_node = args[0] 44 typ, len = get_type(in_node) 45 if isinstance(in_node, int): 46 47 if not SizeLimits.in_bounds('uint256', in_node): 48 raise InvalidLiteralException("Number out of range: {}".format(in_node)) 49 _unit = in_node.typ.unit if typ == 'int128' else None 50 return LLLnode.from_list(in_node, typ=BaseType('uint256', _unit), pos=getpos(expr)) 51 elif isinstance(in_node, LLLnode) and typ in ('int128', 'num_literal'): 52 _unit = in_node.typ.unit if typ == 'int128' else None 53 return LLLnode.from_list(['clampge', in_node, 0], typ=BaseType('uint256', _unit), pos=getpos(expr)) 54 elif isinstance(in_node, LLLnode) and typ in ('bytes32'): 55 return LLLnode(value=in_node.value, args=in_node.args, typ=BaseType('uint256'), pos=getpos(expr)) 56 else: 57 raise InvalidLiteralException("Invalid input for uint256: %r" % in_node, expr) 58 59 60 @signature(('int128', 'uint256'), 'str_literal') 61 def to_decimal(expr, args, kwargs, context): 62 input = args[0] 63 if input.typ.typ == 'uint256': 64 return LLLnode.from_list( 65 ['uclample', ['mul', input, DECIMAL_DIVISOR], ['mload', MemoryPositions.MAXDECIMAL]], 66 typ=BaseType('decimal', input.typ.unit, input.typ.positional), pos=getpos(expr) 67 ) 68 else: 69 return LLLnode.from_list( 70 ['mul', input, DECIMAL_DIVISOR], 71 typ=BaseType('decimal', input.typ.unit, input.typ.positional), 72 pos=getpos(expr) 73 ) 74 75 76 @signature(('int128', 'uint256', 'address', 'bytes'), 'str_literal') 77 def to_bytes32(expr, args, kwargs, context): 78 input = args[0] 79 typ, len = get_type(input) 80 if typ == 'bytes': 81 if len != 32: 82 raise TypeMismatchException("Unable to convert bytes[{}] to bytes32".format(len)) 83 if input.location == "memory": 84 return LLLnode.from_list( 85 ['mload', ['add', input, 32]], typ=BaseType('bytes32') 86 ) 87 elif input.location == "storage": 88 return LLLnode.from_list( 89 ['sload', ['add', ['sha3_32', input], 1]], typ=BaseType('bytes32') 90 ) 91 else: 92 return LLLnode(value=input.value, args=input.args, typ=BaseType('bytes32'), pos=getpos(expr)) 93 94 95 def convert(expr, context): 96 output_type = expr.args[1].s 97 if output_type in conversion_table: 98 return conversion_table[output_type](expr, context) 99 else: 100 raise Exception("Conversion to {} is invalid.".format(output_type)) 101 102 103 conversion_table = { 104 'int128': to_int128, 105 'uint256': to_uint256, 106 'decimal': to_decimal, 107 'bytes32': to_bytes32, 108 } 109 [end of vyper/types/convert.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/vyper/types/convert.py b/vyper/types/convert.py --- a/vyper/types/convert.py +++ b/vyper/types/convert.py @@ -23,11 +23,11 @@ ) -@signature(('int128', 'uint256', 'bytes32', 'bytes'), 'str_literal') +@signature(('uint256', 'bytes32', 'bytes'), 'str_literal') def to_int128(expr, args, kwargs, context): in_node = args[0] typ, len = get_type(in_node) - if typ in ('int128', 'uint256', 'bytes32'): + if typ in ('uint256', 'bytes32'): if in_node.typ.is_literal and not SizeLimits.in_bounds('int128', in_node.value): raise InvalidLiteralException("Number out of range: {}".format(in_node.value), expr) return LLLnode.from_list(
{"golden_diff": "diff --git a/vyper/types/convert.py b/vyper/types/convert.py\n--- a/vyper/types/convert.py\n+++ b/vyper/types/convert.py\n@@ -23,11 +23,11 @@\n )\n \n \n-@signature(('int128', 'uint256', 'bytes32', 'bytes'), 'str_literal')\n+@signature(('uint256', 'bytes32', 'bytes'), 'str_literal')\n def to_int128(expr, args, kwargs, context):\n in_node = args[0]\n typ, len = get_type(in_node)\n- if typ in ('int128', 'uint256', 'bytes32'):\n+ if typ in ('uint256', 'bytes32'):\n if in_node.typ.is_literal and not SizeLimits.in_bounds('int128', in_node.value):\n raise InvalidLiteralException(\"Number out of range: {}\".format(in_node.value), expr)\n return LLLnode.from_list(\n", "issue": "Disallow int128->int128 conversion.\n### What's your issue about?\r\n\r\nDisallow`int128` to be converted to `int128`, follows https://github.com/ethereum/vyper/pull/882.\r\n\r\n### How can it be fixed?\r\n\r\nFill this in if you know how to fix it.\r\n\r\n#### Cute Animal Picture\r\n![](https://peopledotcom.files.wordpress.com/2011/06/panda-660.jpg)\r\n\n", "before_files": [{"content": "from vyper.functions.signature import (\n signature\n)\nfrom vyper.parser.parser_utils import (\n LLLnode,\n getpos,\n byte_array_to_num\n)\nfrom vyper.exceptions import (\n InvalidLiteralException,\n TypeMismatchException,\n)\nfrom vyper.types import (\n BaseType,\n)\nfrom vyper.types import (\n get_type,\n)\nfrom vyper.utils import (\n DECIMAL_DIVISOR,\n MemoryPositions,\n SizeLimits\n)\n\n\n@signature(('int128', 'uint256', 'bytes32', 'bytes'), 'str_literal')\ndef to_int128(expr, args, kwargs, context):\n in_node = args[0]\n typ, len = get_type(in_node)\n if typ in ('int128', 'uint256', 'bytes32'):\n if in_node.typ.is_literal and not SizeLimits.in_bounds('int128', in_node.value):\n raise InvalidLiteralException(\"Number out of range: {}\".format(in_node.value), expr)\n return LLLnode.from_list(\n ['clamp', ['mload', MemoryPositions.MINNUM], in_node,\n ['mload', MemoryPositions.MAXNUM]], typ=BaseType('int128', in_node.typ.unit), pos=getpos(expr)\n )\n else:\n return byte_array_to_num(in_node, expr, 'int128')\n\n\n@signature(('num_literal', 'int128', 'bytes32'), 'str_literal')\ndef to_uint256(expr, args, kwargs, context):\n in_node = args[0]\n typ, len = get_type(in_node)\n if isinstance(in_node, int):\n\n if not SizeLimits.in_bounds('uint256', in_node):\n raise InvalidLiteralException(\"Number out of range: {}\".format(in_node))\n _unit = in_node.typ.unit if typ == 'int128' else None\n return LLLnode.from_list(in_node, typ=BaseType('uint256', _unit), pos=getpos(expr))\n elif isinstance(in_node, LLLnode) and typ in ('int128', 'num_literal'):\n _unit = in_node.typ.unit if typ == 'int128' else None\n return LLLnode.from_list(['clampge', in_node, 0], typ=BaseType('uint256', _unit), pos=getpos(expr))\n elif isinstance(in_node, LLLnode) and typ in ('bytes32'):\n return LLLnode(value=in_node.value, args=in_node.args, typ=BaseType('uint256'), pos=getpos(expr))\n else:\n raise InvalidLiteralException(\"Invalid input for uint256: %r\" % in_node, expr)\n\n\n@signature(('int128', 'uint256'), 'str_literal')\ndef to_decimal(expr, args, kwargs, context):\n input = args[0]\n if input.typ.typ == 'uint256':\n return LLLnode.from_list(\n ['uclample', ['mul', input, DECIMAL_DIVISOR], ['mload', MemoryPositions.MAXDECIMAL]],\n typ=BaseType('decimal', input.typ.unit, input.typ.positional), pos=getpos(expr)\n )\n else:\n return LLLnode.from_list(\n ['mul', input, DECIMAL_DIVISOR],\n typ=BaseType('decimal', input.typ.unit, input.typ.positional),\n pos=getpos(expr)\n )\n\n\n@signature(('int128', 'uint256', 'address', 'bytes'), 'str_literal')\ndef to_bytes32(expr, args, kwargs, context):\n input = args[0]\n typ, len = get_type(input)\n if typ == 'bytes':\n if len != 32:\n raise TypeMismatchException(\"Unable to convert bytes[{}] to bytes32\".format(len))\n if input.location == \"memory\":\n return LLLnode.from_list(\n ['mload', ['add', input, 32]], typ=BaseType('bytes32')\n )\n elif input.location == \"storage\":\n return LLLnode.from_list(\n ['sload', ['add', ['sha3_32', input], 1]], typ=BaseType('bytes32')\n )\n else:\n return LLLnode(value=input.value, args=input.args, typ=BaseType('bytes32'), pos=getpos(expr))\n\n\ndef convert(expr, context):\n output_type = expr.args[1].s\n if output_type in conversion_table:\n return conversion_table[output_type](expr, context)\n else:\n raise Exception(\"Conversion to {} is invalid.\".format(output_type))\n\n\nconversion_table = {\n 'int128': to_int128,\n 'uint256': to_uint256,\n 'decimal': to_decimal,\n 'bytes32': to_bytes32,\n}\n", "path": "vyper/types/convert.py"}]}
1,925
220
gh_patches_debug_9417
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-3804
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> boots.py spider doesn't correctly pick up all opticians The current test in boots.py to switch the brand tags for opticians is `properties["name"].startswith("Opticians - ")`: https://github.com/alltheplaces/alltheplaces/blob/master/locations/spiders/boots.py#L73 But this is not general enough to catch all of them. The displayed name of some opticians branches only start with "Opticians " or "Opticians-". For example https://www.boots.com/stores/3730-tewkesbury-high-street-opticians-gl20-5jz and https://www.boots.com/stores/3947-camden-high-street-opticians-nw1-0lu I think you could safely change the test to `properties["name"].startswith("Opticians")` but the code a few lines below to strip out the "Opticians" prefix would need to be more complicated. </issue> <code> [start of locations/spiders/boots.py] 1 import scrapy 2 3 from locations.items import GeojsonPointItem 4 5 6 class BootsSpider(scrapy.Spider): 7 name = "boots" 8 item_attributes = {"brand": "Boots", "brand_wikidata": "Q6123139"} 9 allowed_domains = ["www.boots.com", "www.boots.ie"] 10 download_delay = 0.5 11 start_urls = ["http://www.boots.com/store-a-z", "http://www.boots.ie/store-a-z"] 12 13 def parse_hours(self, lis): 14 hours = [] 15 for li in lis: 16 day = li.xpath( 17 'normalize-space(./td[@class="store_hours_day"]/text())' 18 ).extract_first() 19 times = ( 20 li.xpath('normalize-space(./td[@class="store_hours_time"]/text())') 21 .extract_first() 22 .replace(" ", "") 23 .replace("Closed-Closed", "off") 24 ) 25 if times and day: 26 hours.append(day[:2] + " " + times) 27 28 return "; ".join(hours) 29 30 def parse_stores(self, response): 31 addr_full = response.xpath( 32 '//section[@class="store_details_content rowContainer"]/dl[@class="store_info_list"][1]/dd[@class="store_info_list_item"]/text()' 33 ).extract() 34 address = ", ".join(map(str.strip, addr_full)) 35 # Handle blank store pages e.g. https://www.boots.com/stores/2250-alnwick-paikes-street-ne66-1hx 36 if len(address) == 0: 37 return 38 39 properties = { 40 "ref": response.xpath( 41 'normalize-space(//input[@id="bootsStoreId"]/@value)' 42 ).extract_first(), 43 "name": response.xpath( 44 'normalize-space(//input[@id="inputLocation"][@name="inputLocation"]/@value)' 45 ).extract_first(), 46 "postcode": response.xpath( 47 'normalize-space(//input[@id="storePostcode"]/@value)' 48 ).extract_first(), 49 "addr_full": address, 50 "phone": response.xpath( 51 '//section[@class="store_details_content rowContainer"]/dl[@class="store_info_list"][3]/dd[@class="store_info_list_item"]/a/text()' 52 ).extract_first(), 53 "country": response.xpath( 54 'normalize-space(//input[@id="countryCode"][@name="countryCode"]/@value)' 55 ).extract_first(), 56 "website": response.url, 57 "lat": response.xpath( 58 'normalize-space(//input[@id="lat"]/@value)' 59 ).extract_first(), 60 "lon": response.xpath( 61 'normalize-space(//input[@id="lon"]/@value)' 62 ).extract_first(), 63 } 64 65 hours = self.parse_hours( 66 response.xpath( 67 '//div[@class="row store_all_opening_hours"]/div[1]/table[@class="store_opening_hours "]/tbody/tr' 68 ) 69 ) 70 if hours: 71 properties["opening_hours"] = hours 72 73 if properties["name"].startswith("Opticians - "): 74 properties["brand"] = "Boots Opticians" 75 properties["brand_wikidata"] = "Q4944037" 76 properties["name"] = properties["name"][12:] 77 78 yield GeojsonPointItem(**properties) 79 80 def parse(self, response): 81 urls = response.xpath( 82 '//div[@class="brand_list_viewer"]/div[@class="column"]/ul/li/a/@href' 83 ).extract() 84 for path in urls: 85 yield scrapy.Request(response.urljoin(path), callback=self.parse_stores) 86 [end of locations/spiders/boots.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/locations/spiders/boots.py b/locations/spiders/boots.py --- a/locations/spiders/boots.py +++ b/locations/spiders/boots.py @@ -70,10 +70,10 @@ if hours: properties["opening_hours"] = hours - if properties["name"].startswith("Opticians - "): + if properties["name"].startswith("Opticians"): properties["brand"] = "Boots Opticians" properties["brand_wikidata"] = "Q4944037" - properties["name"] = properties["name"][12:] + properties["name"] = properties["name"].replace("Opticians", "").strip("- ") yield GeojsonPointItem(**properties)
{"golden_diff": "diff --git a/locations/spiders/boots.py b/locations/spiders/boots.py\n--- a/locations/spiders/boots.py\n+++ b/locations/spiders/boots.py\n@@ -70,10 +70,10 @@\n if hours:\n properties[\"opening_hours\"] = hours\n \n- if properties[\"name\"].startswith(\"Opticians - \"):\n+ if properties[\"name\"].startswith(\"Opticians\"):\n properties[\"brand\"] = \"Boots Opticians\"\n properties[\"brand_wikidata\"] = \"Q4944037\"\n- properties[\"name\"] = properties[\"name\"][12:]\n+ properties[\"name\"] = properties[\"name\"].replace(\"Opticians\", \"\").strip(\"- \")\n \n yield GeojsonPointItem(**properties)\n", "issue": "boots.py spider doesn't correctly pick up all opticians\nThe current test in boots.py to switch the brand tags for opticians is `properties[\"name\"].startswith(\"Opticians - \")`:\r\nhttps://github.com/alltheplaces/alltheplaces/blob/master/locations/spiders/boots.py#L73\r\n\r\nBut this is not general enough to catch all of them. The displayed name of some opticians branches only start with \"Opticians \" or \"Opticians-\". For example https://www.boots.com/stores/3730-tewkesbury-high-street-opticians-gl20-5jz and https://www.boots.com/stores/3947-camden-high-street-opticians-nw1-0lu\r\n\r\nI think you could safely change the test to `properties[\"name\"].startswith(\"Opticians\")` but the code a few lines below to strip out the \"Opticians\" prefix would need to be more complicated.\n", "before_files": [{"content": "import scrapy\n\nfrom locations.items import GeojsonPointItem\n\n\nclass BootsSpider(scrapy.Spider):\n name = \"boots\"\n item_attributes = {\"brand\": \"Boots\", \"brand_wikidata\": \"Q6123139\"}\n allowed_domains = [\"www.boots.com\", \"www.boots.ie\"]\n download_delay = 0.5\n start_urls = [\"http://www.boots.com/store-a-z\", \"http://www.boots.ie/store-a-z\"]\n\n def parse_hours(self, lis):\n hours = []\n for li in lis:\n day = li.xpath(\n 'normalize-space(./td[@class=\"store_hours_day\"]/text())'\n ).extract_first()\n times = (\n li.xpath('normalize-space(./td[@class=\"store_hours_time\"]/text())')\n .extract_first()\n .replace(\" \", \"\")\n .replace(\"Closed-Closed\", \"off\")\n )\n if times and day:\n hours.append(day[:2] + \" \" + times)\n\n return \"; \".join(hours)\n\n def parse_stores(self, response):\n addr_full = response.xpath(\n '//section[@class=\"store_details_content rowContainer\"]/dl[@class=\"store_info_list\"][1]/dd[@class=\"store_info_list_item\"]/text()'\n ).extract()\n address = \", \".join(map(str.strip, addr_full))\n # Handle blank store pages e.g. https://www.boots.com/stores/2250-alnwick-paikes-street-ne66-1hx\n if len(address) == 0:\n return\n\n properties = {\n \"ref\": response.xpath(\n 'normalize-space(//input[@id=\"bootsStoreId\"]/@value)'\n ).extract_first(),\n \"name\": response.xpath(\n 'normalize-space(//input[@id=\"inputLocation\"][@name=\"inputLocation\"]/@value)'\n ).extract_first(),\n \"postcode\": response.xpath(\n 'normalize-space(//input[@id=\"storePostcode\"]/@value)'\n ).extract_first(),\n \"addr_full\": address,\n \"phone\": response.xpath(\n '//section[@class=\"store_details_content rowContainer\"]/dl[@class=\"store_info_list\"][3]/dd[@class=\"store_info_list_item\"]/a/text()'\n ).extract_first(),\n \"country\": response.xpath(\n 'normalize-space(//input[@id=\"countryCode\"][@name=\"countryCode\"]/@value)'\n ).extract_first(),\n \"website\": response.url,\n \"lat\": response.xpath(\n 'normalize-space(//input[@id=\"lat\"]/@value)'\n ).extract_first(),\n \"lon\": response.xpath(\n 'normalize-space(//input[@id=\"lon\"]/@value)'\n ).extract_first(),\n }\n\n hours = self.parse_hours(\n response.xpath(\n '//div[@class=\"row store_all_opening_hours\"]/div[1]/table[@class=\"store_opening_hours \"]/tbody/tr'\n )\n )\n if hours:\n properties[\"opening_hours\"] = hours\n\n if properties[\"name\"].startswith(\"Opticians - \"):\n properties[\"brand\"] = \"Boots Opticians\"\n properties[\"brand_wikidata\"] = \"Q4944037\"\n properties[\"name\"] = properties[\"name\"][12:]\n\n yield GeojsonPointItem(**properties)\n\n def parse(self, response):\n urls = response.xpath(\n '//div[@class=\"brand_list_viewer\"]/div[@class=\"column\"]/ul/li/a/@href'\n ).extract()\n for path in urls:\n yield scrapy.Request(response.urljoin(path), callback=self.parse_stores)\n", "path": "locations/spiders/boots.py"}]}
1,687
172
gh_patches_debug_384
rasdani/github-patches
git_diff
Gallopsled__pwntools-1811
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> List comprehension in __all__ prevents Pylance from working Thanks for contributing to Pwntools! Ideas from the community help make Pwntools an amazing tool for everybody. If you've got an idea for a new feature, please provide information about: * What the feature does According to https://github.com/microsoft/pylance-release/issues/289, the list comprehension in `__all__` in https://github.com/Gallopsled/pwntools/blob/4e6ccb0da17fb91e43a4f9e95edf4fd83806ba23/pwn/toplevel.py#L85 prevents [Pylance](https://marketplace.visualstudio.com/items?itemName=ms-python.vscode-pylance) from working (when using `from pwn import *` instead of manually importing all modules). https://github.com/compas-dev/compas/issues/621 may be a solution instead of listing all attributes manually to fix that. * Why the feature should exist To make Pylance happy :smile: * What tests should be included Test in VS Code to ensure it works. If you think you can write the feature yourself, please submit a Pull Request and we can review your changes! </issue> <code> [start of pwn/toplevel.py] 1 # Get all the modules from pwnlib 2 import collections 3 import logging 4 import math 5 import operator 6 import os 7 import platform 8 import re 9 import requests 10 import socks 11 import signal 12 import string 13 import struct 14 import subprocess 15 import sys 16 import tempfile 17 import threading 18 import time 19 20 from pprint import pprint 21 22 import pwnlib 23 from pwnlib import * 24 from pwnlib.asm import * 25 from pwnlib.context import Thread 26 from pwnlib.context import context, LocalContext 27 from pwnlib.dynelf import DynELF 28 from pwnlib.encoders import * 29 from pwnlib.elf.corefile import Core, Corefile, Coredump 30 from pwnlib.elf.elf import ELF, load 31 from pwnlib.encoders import * 32 from pwnlib.exception import PwnlibException 33 from pwnlib.gdb import attach, debug_assembly, debug_shellcode 34 from pwnlib.filepointer import * 35 from pwnlib.flag import * 36 from pwnlib.fmtstr import FmtStr, fmtstr_payload, fmtstr_split 37 from pwnlib.log import getLogger 38 from pwnlib.memleak import MemLeak, RelativeMemLeak 39 from pwnlib.regsort import * 40 from pwnlib.replacements import * 41 from pwnlib.rop import ROP 42 from pwnlib.rop.call import AppendedArgument 43 from pwnlib.rop.srop import SigreturnFrame 44 from pwnlib.rop.ret2dlresolve import Ret2dlresolvePayload 45 from pwnlib.runner import * 46 from pwnlib.term.readline import str_input 47 from pwnlib.timeout import Timeout 48 from pwnlib.tubes.listen import listen 49 from pwnlib.tubes.process import process, PTY, PIPE, STDOUT 50 from pwnlib.tubes.remote import remote, tcp, udp, connect 51 from pwnlib.tubes.serialtube import serialtube 52 from pwnlib.tubes.server import server 53 from pwnlib.tubes.ssh import ssh 54 from pwnlib.tubes.tube import tube 55 from pwnlib.ui import * 56 from pwnlib.util import crc 57 from pwnlib.util import iters 58 from pwnlib.util import net 59 from pwnlib.util import proc 60 from pwnlib.util import safeeval 61 from pwnlib.util.crc import BitPolynom 62 from pwnlib.util.cyclic import * 63 from pwnlib.util.fiddling import * 64 from pwnlib.util.getdents import * 65 from pwnlib.util.hashes import * 66 from pwnlib.util.lists import * 67 from pwnlib.util.misc import * 68 from pwnlib.util.packing import * 69 from pwnlib.util.proc import pidof 70 from pwnlib.util.sh_string import sh_string, sh_prepare, sh_command_with 71 from pwnlib.util.splash import * 72 from pwnlib.util.web import * 73 74 # Promote these modules, so that "from pwn import *" will let you access them 75 76 from six.moves import cPickle as pickle, cStringIO as StringIO 77 from six import BytesIO 78 79 error = log.error 80 warning = log.warning 81 warn = log.warning 82 info = log.info 83 debug = log.debug 84 success = log.success 85 86 __all__ = [x for x in tuple(globals()) if x != '__name__'] 87 [end of pwn/toplevel.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pwn/toplevel.py b/pwn/toplevel.py --- a/pwn/toplevel.py +++ b/pwn/toplevel.py @@ -83,4 +83,5 @@ debug = log.debug success = log.success -__all__ = [x for x in tuple(globals()) if x != '__name__'] +# Equivalence with the default behavior of "from import *" +# __all__ = [x for x in tuple(globals()) if not x.startswith('_')]
{"golden_diff": "diff --git a/pwn/toplevel.py b/pwn/toplevel.py\n--- a/pwn/toplevel.py\n+++ b/pwn/toplevel.py\n@@ -83,4 +83,5 @@\n debug = log.debug\n success = log.success\n \n-__all__ = [x for x in tuple(globals()) if x != '__name__']\n+# Equivalence with the default behavior of \"from import *\"\n+# __all__ = [x for x in tuple(globals()) if not x.startswith('_')]\n", "issue": "List comprehension in __all__ prevents Pylance from working\nThanks for contributing to Pwntools! Ideas from the community help make Pwntools an amazing tool for everybody.\r\n\r\nIf you've got an idea for a new feature, please provide information about:\r\n\r\n* What the feature does\r\nAccording to https://github.com/microsoft/pylance-release/issues/289, the list comprehension in `__all__` in https://github.com/Gallopsled/pwntools/blob/4e6ccb0da17fb91e43a4f9e95edf4fd83806ba23/pwn/toplevel.py#L85 prevents [Pylance](https://marketplace.visualstudio.com/items?itemName=ms-python.vscode-pylance) from working (when using `from pwn import *` instead of manually importing all modules).\r\nhttps://github.com/compas-dev/compas/issues/621 may be a solution instead of listing all attributes manually to fix that.\r\n* Why the feature should exist\r\nTo make Pylance happy :smile: \r\n* What tests should be included\r\nTest in VS Code to ensure it works.\r\n\r\nIf you think you can write the feature yourself, please submit a Pull Request and we can review your changes!\r\n\n", "before_files": [{"content": "# Get all the modules from pwnlib\nimport collections\nimport logging\nimport math\nimport operator\nimport os\nimport platform\nimport re\nimport requests\nimport socks\nimport signal\nimport string\nimport struct\nimport subprocess\nimport sys\nimport tempfile\nimport threading\nimport time\n\nfrom pprint import pprint\n\nimport pwnlib\nfrom pwnlib import *\nfrom pwnlib.asm import *\nfrom pwnlib.context import Thread\nfrom pwnlib.context import context, LocalContext\nfrom pwnlib.dynelf import DynELF\nfrom pwnlib.encoders import *\nfrom pwnlib.elf.corefile import Core, Corefile, Coredump\nfrom pwnlib.elf.elf import ELF, load\nfrom pwnlib.encoders import *\nfrom pwnlib.exception import PwnlibException\nfrom pwnlib.gdb import attach, debug_assembly, debug_shellcode\nfrom pwnlib.filepointer import *\nfrom pwnlib.flag import *\nfrom pwnlib.fmtstr import FmtStr, fmtstr_payload, fmtstr_split\nfrom pwnlib.log import getLogger\nfrom pwnlib.memleak import MemLeak, RelativeMemLeak\nfrom pwnlib.regsort import *\nfrom pwnlib.replacements import *\nfrom pwnlib.rop import ROP\nfrom pwnlib.rop.call import AppendedArgument\nfrom pwnlib.rop.srop import SigreturnFrame\nfrom pwnlib.rop.ret2dlresolve import Ret2dlresolvePayload\nfrom pwnlib.runner import *\nfrom pwnlib.term.readline import str_input\nfrom pwnlib.timeout import Timeout\nfrom pwnlib.tubes.listen import listen\nfrom pwnlib.tubes.process import process, PTY, PIPE, STDOUT\nfrom pwnlib.tubes.remote import remote, tcp, udp, connect\nfrom pwnlib.tubes.serialtube import serialtube\nfrom pwnlib.tubes.server import server\nfrom pwnlib.tubes.ssh import ssh\nfrom pwnlib.tubes.tube import tube\nfrom pwnlib.ui import *\nfrom pwnlib.util import crc\nfrom pwnlib.util import iters\nfrom pwnlib.util import net\nfrom pwnlib.util import proc\nfrom pwnlib.util import safeeval\nfrom pwnlib.util.crc import BitPolynom\nfrom pwnlib.util.cyclic import *\nfrom pwnlib.util.fiddling import *\nfrom pwnlib.util.getdents import *\nfrom pwnlib.util.hashes import *\nfrom pwnlib.util.lists import *\nfrom pwnlib.util.misc import *\nfrom pwnlib.util.packing import *\nfrom pwnlib.util.proc import pidof\nfrom pwnlib.util.sh_string import sh_string, sh_prepare, sh_command_with\nfrom pwnlib.util.splash import *\nfrom pwnlib.util.web import *\n\n# Promote these modules, so that \"from pwn import *\" will let you access them\n\nfrom six.moves import cPickle as pickle, cStringIO as StringIO\nfrom six import BytesIO\n\nerror = log.error\nwarning = log.warning\nwarn = log.warning\ninfo = log.info\ndebug = log.debug\nsuccess = log.success\n\n__all__ = [x for x in tuple(globals()) if x != '__name__']\n", "path": "pwn/toplevel.py"}]}
1,679
109
gh_patches_debug_14937
rasdani/github-patches
git_diff
microsoft__DeepSpeed-4918
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> nv-sd CI test failure The Nightly CI for https://github.com/microsoft/DeepSpeed/actions/runs/7434747598 failed. </issue> <code> [start of deepspeed/module_inject/containers/vae.py] 1 # Copyright (c) Microsoft Corporation. 2 # SPDX-License-Identifier: Apache-2.0 3 4 # DeepSpeed Team 5 6 from ..policy import DSPolicy 7 from ...model_implementations.diffusers.vae import DSVAE 8 9 10 class VAEPolicy(DSPolicy): 11 12 def __init__(self): 13 super().__init__() 14 try: 15 import diffusers 16 if hasattr(diffusers.models.vae, "AutoencoderKL"): 17 self._orig_layer_class = diffusers.models.vae.AutoencoderKL 18 else: 19 # Diffusers >= 0.12.0 changes location of AutoencoderKL 20 self._orig_layer_class = diffusers.models.autoencoder_kl.AutoencoderKL 21 except ImportError: 22 self._orig_layer_class = None 23 24 def match(self, module): 25 return isinstance(module, self._orig_layer_class) 26 27 def match_replaced(self, module): 28 return isinstance(module, DSVAE) 29 30 def apply(self, module, enable_cuda_graph=True): 31 # TODO(cmikeh2): Enable cuda graph should be an inference configuration 32 return DSVAE(module, enable_cuda_graph=enable_cuda_graph) 33 34 # NOTE (lekurile): Should we have a diffusers policy class? 35 def attention(self, client_module): 36 pass 37 [end of deepspeed/module_inject/containers/vae.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/deepspeed/module_inject/containers/vae.py b/deepspeed/module_inject/containers/vae.py --- a/deepspeed/module_inject/containers/vae.py +++ b/deepspeed/module_inject/containers/vae.py @@ -13,11 +13,11 @@ super().__init__() try: import diffusers - if hasattr(diffusers.models.vae, "AutoencoderKL"): - self._orig_layer_class = diffusers.models.vae.AutoencoderKL + if hasattr(diffusers.models.autoencoders.vae, "AutoencoderKL"): + self._orig_layer_class = diffusers.models.autoencoders.vae.AutoencoderKL else: # Diffusers >= 0.12.0 changes location of AutoencoderKL - self._orig_layer_class = diffusers.models.autoencoder_kl.AutoencoderKL + self._orig_layer_class = diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL except ImportError: self._orig_layer_class = None
{"golden_diff": "diff --git a/deepspeed/module_inject/containers/vae.py b/deepspeed/module_inject/containers/vae.py\n--- a/deepspeed/module_inject/containers/vae.py\n+++ b/deepspeed/module_inject/containers/vae.py\n@@ -13,11 +13,11 @@\n super().__init__()\n try:\n import diffusers\n- if hasattr(diffusers.models.vae, \"AutoencoderKL\"):\n- self._orig_layer_class = diffusers.models.vae.AutoencoderKL\n+ if hasattr(diffusers.models.autoencoders.vae, \"AutoencoderKL\"):\n+ self._orig_layer_class = diffusers.models.autoencoders.vae.AutoencoderKL\n else:\n # Diffusers >= 0.12.0 changes location of AutoencoderKL\n- self._orig_layer_class = diffusers.models.autoencoder_kl.AutoencoderKL\n+ self._orig_layer_class = diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL\n except ImportError:\n self._orig_layer_class = None\n", "issue": "nv-sd CI test failure\nThe Nightly CI for https://github.com/microsoft/DeepSpeed/actions/runs/7434747598 failed.\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n# SPDX-License-Identifier: Apache-2.0\n\n# DeepSpeed Team\n\nfrom ..policy import DSPolicy\nfrom ...model_implementations.diffusers.vae import DSVAE\n\n\nclass VAEPolicy(DSPolicy):\n\n def __init__(self):\n super().__init__()\n try:\n import diffusers\n if hasattr(diffusers.models.vae, \"AutoencoderKL\"):\n self._orig_layer_class = diffusers.models.vae.AutoencoderKL\n else:\n # Diffusers >= 0.12.0 changes location of AutoencoderKL\n self._orig_layer_class = diffusers.models.autoencoder_kl.AutoencoderKL\n except ImportError:\n self._orig_layer_class = None\n\n def match(self, module):\n return isinstance(module, self._orig_layer_class)\n\n def match_replaced(self, module):\n return isinstance(module, DSVAE)\n\n def apply(self, module, enable_cuda_graph=True):\n # TODO(cmikeh2): Enable cuda graph should be an inference configuration\n return DSVAE(module, enable_cuda_graph=enable_cuda_graph)\n\n # NOTE (lekurile): Should we have a diffusers policy class?\n def attention(self, client_module):\n pass\n", "path": "deepspeed/module_inject/containers/vae.py"}]}
921
229
gh_patches_debug_37365
rasdani/github-patches
git_diff
pantsbuild__pants-13583
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Scala import extraction for inference Inference for Scala will require (at a minimum) import extraction from Scala sources. In v1 this was accomplished with https://scalameta.org/, which still seems to be active. https://scalameta.org/docs/trees/guide.html#parse-trees </issue> <code> [start of src/python/pants/backend/scala/dependency_inference/rules.py] 1 # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 from __future__ import annotations 4 5 import logging 6 7 from pants.backend.scala.dependency_inference import scala_parser, symbol_mapper 8 from pants.backend.scala.dependency_inference.scala_parser import ScalaSourceDependencyAnalysis 9 from pants.backend.scala.subsystems.scala_infer import ScalaInferSubsystem 10 from pants.backend.scala.target_types import ScalaSourceField 11 from pants.build_graph.address import Address 12 from pants.core.util_rules.source_files import SourceFilesRequest 13 from pants.engine.internals.selectors import Get, MultiGet 14 from pants.engine.rules import collect_rules, rule 15 from pants.engine.target import ( 16 Dependencies, 17 DependenciesRequest, 18 ExplicitlyProvidedDependencies, 19 InferDependenciesRequest, 20 InferredDependencies, 21 WrappedTarget, 22 ) 23 from pants.engine.unions import UnionRule 24 from pants.jvm.dependency_inference.symbol_mapper import FirstPartySymbolMapping 25 from pants.util.ordered_set import OrderedSet 26 27 logger = logging.getLogger(__name__) 28 29 30 class InferScalaSourceDependencies(InferDependenciesRequest): 31 infer_from = ScalaSourceField 32 33 34 @rule(desc="Inferring Scala dependencies by analyzing sources") 35 async def infer_scala_dependencies_via_source_analysis( 36 request: InferScalaSourceDependencies, 37 scala_infer_subsystem: ScalaInferSubsystem, 38 first_party_symbol_map: FirstPartySymbolMapping, 39 ) -> InferredDependencies: 40 if not scala_infer_subsystem.imports: 41 return InferredDependencies([]) 42 43 address = request.sources_field.address 44 wrapped_tgt = await Get(WrappedTarget, Address, address) 45 explicitly_provided_deps, analysis = await MultiGet( 46 Get(ExplicitlyProvidedDependencies, DependenciesRequest(wrapped_tgt.target[Dependencies])), 47 Get(ScalaSourceDependencyAnalysis, SourceFilesRequest([request.sources_field])), 48 ) 49 50 symbols: OrderedSet[str] = OrderedSet() 51 if scala_infer_subsystem.imports: 52 symbols.update(analysis.all_imports()) 53 54 dependencies: OrderedSet[Address] = OrderedSet() 55 for symbol in symbols: 56 matches = first_party_symbol_map.symbols.addresses_for_symbol(symbol) 57 if not matches: 58 continue 59 60 explicitly_provided_deps.maybe_warn_of_ambiguous_dependency_inference( 61 matches, 62 address, 63 import_reference="type", 64 context=f"The target {address} imports `{symbol}`", 65 ) 66 67 maybe_disambiguated = explicitly_provided_deps.disambiguated(matches) 68 if maybe_disambiguated: 69 dependencies.add(maybe_disambiguated) 70 71 return InferredDependencies(dependencies) 72 73 74 def rules(): 75 return [ 76 *collect_rules(), 77 *scala_parser.rules(), 78 *symbol_mapper.rules(), 79 UnionRule(InferDependenciesRequest, InferScalaSourceDependencies), 80 ] 81 [end of src/python/pants/backend/scala/dependency_inference/rules.py] [start of src/python/pants/backend/java/subsystems/java_infer.py] 1 # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 from typing import cast 4 5 from pants.option.subsystem import Subsystem 6 from pants.util.docutil import git_url 7 8 9 class JavaInferSubsystem(Subsystem): 10 options_scope = "java-infer" 11 help = "Options controlling which dependencies will be inferred for Java targets." 12 13 @classmethod 14 def register_options(cls, register): 15 super().register_options(register) 16 register( 17 "--imports", 18 default=True, 19 type=bool, 20 help=("Infer a target's dependencies by parsing import statements from sources."), 21 ) 22 register( 23 "--consumed-types", 24 default=True, 25 type=bool, 26 help=("Infer a target's dependencies by parsing consumed types from sources."), 27 ) 28 register( 29 "--third-party-imports", 30 default=True, 31 type=bool, 32 help="Infer a target's third-party dependencies using Java import statements.", 33 ) 34 _default_package_mapping_url = git_url( 35 "src/python/pants/backend/java/dependency_inference/jvm_artifact_mappings.py" 36 ) 37 register( 38 "--third-party-import-mapping", 39 type=dict, 40 help=( 41 "A dictionary mapping a Java package path to a JVM artifact coordinate (GROUP:ARTIFACT) " 42 "without the version. The package path may be made recursive to match symbols in subpackages " 43 "by adding `.**` to the end of the package path. For example, specify `{'org.junit.**': 'junit:junit'} `" 44 "to infer a dependency on junit:junit for any file importing a symbol from org.junit or its " 45 f"subpackages. Pants also supplies a default package mapping ({_default_package_mapping_url})." 46 ), 47 ) 48 49 @property 50 def imports(self) -> bool: 51 return cast(bool, self.options.imports) 52 53 @property 54 def consumed_types(self) -> bool: 55 return cast(bool, self.options.consumed_types) 56 57 @property 58 def third_party_imports(self) -> bool: 59 return cast(bool, self.options.third_party_imports) 60 61 @property 62 def third_party_import_mapping(self) -> dict: 63 return cast(dict, self.options.third_party_import_mapping) 64 [end of src/python/pants/backend/java/subsystems/java_infer.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/python/pants/backend/java/subsystems/java_infer.py b/src/python/pants/backend/java/subsystems/java_infer.py --- a/src/python/pants/backend/java/subsystems/java_infer.py +++ b/src/python/pants/backend/java/subsystems/java_infer.py @@ -34,6 +34,7 @@ _default_package_mapping_url = git_url( "src/python/pants/backend/java/dependency_inference/jvm_artifact_mappings.py" ) + # TODO: Move to `coursier` or a generic `jvm` subsystem. register( "--third-party-import-mapping", type=dict, diff --git a/src/python/pants/backend/scala/dependency_inference/rules.py b/src/python/pants/backend/scala/dependency_inference/rules.py --- a/src/python/pants/backend/scala/dependency_inference/rules.py +++ b/src/python/pants/backend/scala/dependency_inference/rules.py @@ -21,6 +21,12 @@ WrappedTarget, ) from pants.engine.unions import UnionRule +from pants.jvm.dependency_inference import artifact_mapper +from pants.jvm.dependency_inference.artifact_mapper import ( + AvailableThirdPartyArtifacts, + ThirdPartyPackageToArtifactMapping, + find_artifact_mapping, +) from pants.jvm.dependency_inference.symbol_mapper import FirstPartySymbolMapping from pants.util.ordered_set import OrderedSet @@ -36,6 +42,8 @@ request: InferScalaSourceDependencies, scala_infer_subsystem: ScalaInferSubsystem, first_party_symbol_map: FirstPartySymbolMapping, + third_party_artifact_mapping: ThirdPartyPackageToArtifactMapping, + available_artifacts: AvailableThirdPartyArtifacts, ) -> InferredDependencies: if not scala_infer_subsystem.imports: return InferredDependencies([]) @@ -53,7 +61,11 @@ dependencies: OrderedSet[Address] = OrderedSet() for symbol in symbols: - matches = first_party_symbol_map.symbols.addresses_for_symbol(symbol) + first_party_matches = first_party_symbol_map.symbols.addresses_for_symbol(symbol) + third_party_matches = find_artifact_mapping( + symbol, third_party_artifact_mapping, available_artifacts + ) + matches = first_party_matches.union(third_party_matches) if not matches: continue @@ -74,6 +86,7 @@ def rules(): return [ *collect_rules(), + *artifact_mapper.rules(), *scala_parser.rules(), *symbol_mapper.rules(), UnionRule(InferDependenciesRequest, InferScalaSourceDependencies),
{"golden_diff": "diff --git a/src/python/pants/backend/java/subsystems/java_infer.py b/src/python/pants/backend/java/subsystems/java_infer.py\n--- a/src/python/pants/backend/java/subsystems/java_infer.py\n+++ b/src/python/pants/backend/java/subsystems/java_infer.py\n@@ -34,6 +34,7 @@\n _default_package_mapping_url = git_url(\n \"src/python/pants/backend/java/dependency_inference/jvm_artifact_mappings.py\"\n )\n+ # TODO: Move to `coursier` or a generic `jvm` subsystem.\n register(\n \"--third-party-import-mapping\",\n type=dict,\ndiff --git a/src/python/pants/backend/scala/dependency_inference/rules.py b/src/python/pants/backend/scala/dependency_inference/rules.py\n--- a/src/python/pants/backend/scala/dependency_inference/rules.py\n+++ b/src/python/pants/backend/scala/dependency_inference/rules.py\n@@ -21,6 +21,12 @@\n WrappedTarget,\n )\n from pants.engine.unions import UnionRule\n+from pants.jvm.dependency_inference import artifact_mapper\n+from pants.jvm.dependency_inference.artifact_mapper import (\n+ AvailableThirdPartyArtifacts,\n+ ThirdPartyPackageToArtifactMapping,\n+ find_artifact_mapping,\n+)\n from pants.jvm.dependency_inference.symbol_mapper import FirstPartySymbolMapping\n from pants.util.ordered_set import OrderedSet\n \n@@ -36,6 +42,8 @@\n request: InferScalaSourceDependencies,\n scala_infer_subsystem: ScalaInferSubsystem,\n first_party_symbol_map: FirstPartySymbolMapping,\n+ third_party_artifact_mapping: ThirdPartyPackageToArtifactMapping,\n+ available_artifacts: AvailableThirdPartyArtifacts,\n ) -> InferredDependencies:\n if not scala_infer_subsystem.imports:\n return InferredDependencies([])\n@@ -53,7 +61,11 @@\n \n dependencies: OrderedSet[Address] = OrderedSet()\n for symbol in symbols:\n- matches = first_party_symbol_map.symbols.addresses_for_symbol(symbol)\n+ first_party_matches = first_party_symbol_map.symbols.addresses_for_symbol(symbol)\n+ third_party_matches = find_artifact_mapping(\n+ symbol, third_party_artifact_mapping, available_artifacts\n+ )\n+ matches = first_party_matches.union(third_party_matches)\n if not matches:\n continue\n \n@@ -74,6 +86,7 @@\n def rules():\n return [\n *collect_rules(),\n+ *artifact_mapper.rules(),\n *scala_parser.rules(),\n *symbol_mapper.rules(),\n UnionRule(InferDependenciesRequest, InferScalaSourceDependencies),\n", "issue": "Scala import extraction for inference\nInference for Scala will require (at a minimum) import extraction from Scala sources. In v1 this was accomplished with https://scalameta.org/, which still seems to be active.\r\n\r\nhttps://scalameta.org/docs/trees/guide.html#parse-trees\n", "before_files": [{"content": "# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\nfrom __future__ import annotations\n\nimport logging\n\nfrom pants.backend.scala.dependency_inference import scala_parser, symbol_mapper\nfrom pants.backend.scala.dependency_inference.scala_parser import ScalaSourceDependencyAnalysis\nfrom pants.backend.scala.subsystems.scala_infer import ScalaInferSubsystem\nfrom pants.backend.scala.target_types import ScalaSourceField\nfrom pants.build_graph.address import Address\nfrom pants.core.util_rules.source_files import SourceFilesRequest\nfrom pants.engine.internals.selectors import Get, MultiGet\nfrom pants.engine.rules import collect_rules, rule\nfrom pants.engine.target import (\n Dependencies,\n DependenciesRequest,\n ExplicitlyProvidedDependencies,\n InferDependenciesRequest,\n InferredDependencies,\n WrappedTarget,\n)\nfrom pants.engine.unions import UnionRule\nfrom pants.jvm.dependency_inference.symbol_mapper import FirstPartySymbolMapping\nfrom pants.util.ordered_set import OrderedSet\n\nlogger = logging.getLogger(__name__)\n\n\nclass InferScalaSourceDependencies(InferDependenciesRequest):\n infer_from = ScalaSourceField\n\n\n@rule(desc=\"Inferring Scala dependencies by analyzing sources\")\nasync def infer_scala_dependencies_via_source_analysis(\n request: InferScalaSourceDependencies,\n scala_infer_subsystem: ScalaInferSubsystem,\n first_party_symbol_map: FirstPartySymbolMapping,\n) -> InferredDependencies:\n if not scala_infer_subsystem.imports:\n return InferredDependencies([])\n\n address = request.sources_field.address\n wrapped_tgt = await Get(WrappedTarget, Address, address)\n explicitly_provided_deps, analysis = await MultiGet(\n Get(ExplicitlyProvidedDependencies, DependenciesRequest(wrapped_tgt.target[Dependencies])),\n Get(ScalaSourceDependencyAnalysis, SourceFilesRequest([request.sources_field])),\n )\n\n symbols: OrderedSet[str] = OrderedSet()\n if scala_infer_subsystem.imports:\n symbols.update(analysis.all_imports())\n\n dependencies: OrderedSet[Address] = OrderedSet()\n for symbol in symbols:\n matches = first_party_symbol_map.symbols.addresses_for_symbol(symbol)\n if not matches:\n continue\n\n explicitly_provided_deps.maybe_warn_of_ambiguous_dependency_inference(\n matches,\n address,\n import_reference=\"type\",\n context=f\"The target {address} imports `{symbol}`\",\n )\n\n maybe_disambiguated = explicitly_provided_deps.disambiguated(matches)\n if maybe_disambiguated:\n dependencies.add(maybe_disambiguated)\n\n return InferredDependencies(dependencies)\n\n\ndef rules():\n return [\n *collect_rules(),\n *scala_parser.rules(),\n *symbol_mapper.rules(),\n UnionRule(InferDependenciesRequest, InferScalaSourceDependencies),\n ]\n", "path": "src/python/pants/backend/scala/dependency_inference/rules.py"}, {"content": "# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\nfrom typing import cast\n\nfrom pants.option.subsystem import Subsystem\nfrom pants.util.docutil import git_url\n\n\nclass JavaInferSubsystem(Subsystem):\n options_scope = \"java-infer\"\n help = \"Options controlling which dependencies will be inferred for Java targets.\"\n\n @classmethod\n def register_options(cls, register):\n super().register_options(register)\n register(\n \"--imports\",\n default=True,\n type=bool,\n help=(\"Infer a target's dependencies by parsing import statements from sources.\"),\n )\n register(\n \"--consumed-types\",\n default=True,\n type=bool,\n help=(\"Infer a target's dependencies by parsing consumed types from sources.\"),\n )\n register(\n \"--third-party-imports\",\n default=True,\n type=bool,\n help=\"Infer a target's third-party dependencies using Java import statements.\",\n )\n _default_package_mapping_url = git_url(\n \"src/python/pants/backend/java/dependency_inference/jvm_artifact_mappings.py\"\n )\n register(\n \"--third-party-import-mapping\",\n type=dict,\n help=(\n \"A dictionary mapping a Java package path to a JVM artifact coordinate (GROUP:ARTIFACT) \"\n \"without the version. The package path may be made recursive to match symbols in subpackages \"\n \"by adding `.**` to the end of the package path. For example, specify `{'org.junit.**': 'junit:junit'} `\"\n \"to infer a dependency on junit:junit for any file importing a symbol from org.junit or its \"\n f\"subpackages. Pants also supplies a default package mapping ({_default_package_mapping_url}).\"\n ),\n )\n\n @property\n def imports(self) -> bool:\n return cast(bool, self.options.imports)\n\n @property\n def consumed_types(self) -> bool:\n return cast(bool, self.options.consumed_types)\n\n @property\n def third_party_imports(self) -> bool:\n return cast(bool, self.options.third_party_imports)\n\n @property\n def third_party_import_mapping(self) -> dict:\n return cast(dict, self.options.third_party_import_mapping)\n", "path": "src/python/pants/backend/java/subsystems/java_infer.py"}]}
1,998
572
gh_patches_debug_9049
rasdani/github-patches
git_diff
avocado-framework__avocado-714
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Proper simple tests examples Even though simple tests are, well, simple, let's have a couple of them in the examples directory. A big reason for that is that we currently use wrappers as the simple tests examples in the Getting Started guide (`avocado list examples/wrappers`) which can be confusing to new users. </issue> <code> [start of setup.py] 1 #!/bin/env python 2 # This program is free software; you can redistribute it and/or modify 3 # it under the terms of the GNU General Public License as published by 4 # the Free Software Foundation; either version 2 of the License, or 5 # (at your option) any later version. 6 # 7 # This program is distributed in the hope that it will be useful, 8 # but WITHOUT ANY WARRANTY; without even the implied warranty of 9 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 10 # 11 # See LICENSE for more details. 12 # 13 # Copyright: Red Hat Inc. 2013-2014 14 # Author: Lucas Meneghel Rodrigues <[email protected]> 15 16 import glob 17 import os 18 # pylint: disable=E0611 19 20 from distutils.core import setup 21 22 from avocado import VERSION 23 24 25 VIRTUAL_ENV = 'VIRTUAL_ENV' in os.environ 26 27 28 def get_dir(system_path=None, virtual_path=None): 29 """ 30 Retrieve VIRTUAL_ENV friendly path 31 :param system_path: Relative system path 32 :param virtual_path: Overrides system_path for virtual_env only 33 :return: VIRTUAL_ENV friendly path 34 """ 35 if virtual_path is None: 36 virtual_path = system_path 37 if VIRTUAL_ENV: 38 if virtual_path is None: 39 virtual_path = [] 40 return os.path.join(*virtual_path) 41 else: 42 if system_path is None: 43 system_path = [] 44 return os.path.join(*(['/'] + system_path)) 45 46 47 def get_tests_dir(): 48 return get_dir(['usr', 'share', 'avocado', 'tests'], ['tests']) 49 50 51 def get_avocado_libexec_dir(): 52 if VIRTUAL_ENV: 53 return get_dir(['libexec']) 54 elif os.path.exists('/usr/libexec'): # RHEL-like distro 55 return get_dir(['usr', 'libexec', 'avocado']) 56 else: # Debian-like distro 57 return get_dir(['usr', 'lib', 'avocado']) 58 59 60 def get_data_files(): 61 data_files = [(get_dir(['etc', 'avocado']), ['etc/avocado/avocado.conf'])] 62 data_files += [(get_dir(['etc', 'avocado', 'conf.d']), 63 ['etc/avocado/conf.d/README', 'etc/avocado/conf.d/gdb.conf'])] 64 data_files += [(get_dir(['etc', 'avocado', 'sysinfo']), 65 ['etc/avocado/sysinfo/commands', 'etc/avocado/sysinfo/files', 66 'etc/avocado/sysinfo/profilers'])] 67 data_files += [(get_tests_dir(), glob.glob('examples/tests/*.py'))] 68 for data_dir in glob.glob('examples/tests/*.data'): 69 fmt_str = '%s/*' % data_dir 70 for f in glob.glob(fmt_str): 71 data_files += [(os.path.join(get_tests_dir(), 72 os.path.basename(data_dir)), [f])] 73 data_files.append((get_dir(['usr', 'share', 'doc', 'avocado'], ['.']), 74 ['man/avocado.rst', 'man/avocado-rest-client.rst'])) 75 data_files += [(get_dir(['usr', 'share', 'avocado', 'wrappers'], 76 ['wrappers']), 77 glob.glob('examples/wrappers/*.sh'))] 78 data_files.append((get_avocado_libexec_dir(), glob.glob('libexec/*'))) 79 return data_files 80 81 82 def _get_plugin_resource_files(path): 83 """ 84 Given a path, return all the files in there to package 85 """ 86 flist = [] 87 for root, _, files in sorted(os.walk(path)): 88 for name in files: 89 fullname = os.path.join(root, name) 90 flist.append(fullname[len('avocado/core/plugins/'):]) 91 return flist 92 93 94 def get_long_description(): 95 with open('README.rst', 'r') as req: 96 req_contents = req.read() 97 return req_contents 98 99 if __name__ == '__main__': 100 setup(name='avocado', 101 version=VERSION, 102 description='Avocado Test Framework', 103 long_description=get_long_description(), 104 author='Avocado Developers', 105 author_email='[email protected]', 106 url='http://avocado-framework.github.io/', 107 packages=['avocado', 108 'avocado.core', 109 'avocado.core.plugins', 110 'avocado.utils', 111 'avocado.utils.external', 112 'avocado.core.remote', 113 'avocado.core.restclient', 114 'avocado.core.restclient.cli', 115 'avocado.core.restclient.cli.args', 116 'avocado.core.restclient.cli.actions'], 117 package_data={'avocado.core.plugins': _get_plugin_resource_files( 118 'avocado/core/plugins/resources')}, 119 data_files=get_data_files(), 120 scripts=['scripts/avocado', 121 'scripts/avocado-rest-client']) 122 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -75,6 +75,11 @@ data_files += [(get_dir(['usr', 'share', 'avocado', 'wrappers'], ['wrappers']), glob.glob('examples/wrappers/*.sh'))] + + data_files += [(get_dir(['usr', 'share', 'avocado', 'simpletests'], + ['simpletests']), + glob.glob('examples/simpletests/*.sh'))] + data_files.append((get_avocado_libexec_dir(), glob.glob('libexec/*'))) return data_files
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -75,6 +75,11 @@\n data_files += [(get_dir(['usr', 'share', 'avocado', 'wrappers'],\n ['wrappers']),\n glob.glob('examples/wrappers/*.sh'))]\n+\n+ data_files += [(get_dir(['usr', 'share', 'avocado', 'simpletests'],\n+ ['simpletests']),\n+ glob.glob('examples/simpletests/*.sh'))]\n+\n data_files.append((get_avocado_libexec_dir(), glob.glob('libexec/*')))\n return data_files\n", "issue": "Proper simple tests examples\nEven though simple tests are, well, simple, let's have a couple of them in the examples directory.\n\nA big reason for that is that we currently use wrappers as the simple tests examples in the Getting Started guide (`avocado list examples/wrappers`) which can be confusing to new users.\n\n", "before_files": [{"content": "#!/bin/env python\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n#\n# See LICENSE for more details.\n#\n# Copyright: Red Hat Inc. 2013-2014\n# Author: Lucas Meneghel Rodrigues <[email protected]>\n\nimport glob\nimport os\n# pylint: disable=E0611\n\nfrom distutils.core import setup\n\nfrom avocado import VERSION\n\n\nVIRTUAL_ENV = 'VIRTUAL_ENV' in os.environ\n\n\ndef get_dir(system_path=None, virtual_path=None):\n \"\"\"\n Retrieve VIRTUAL_ENV friendly path\n :param system_path: Relative system path\n :param virtual_path: Overrides system_path for virtual_env only\n :return: VIRTUAL_ENV friendly path\n \"\"\"\n if virtual_path is None:\n virtual_path = system_path\n if VIRTUAL_ENV:\n if virtual_path is None:\n virtual_path = []\n return os.path.join(*virtual_path)\n else:\n if system_path is None:\n system_path = []\n return os.path.join(*(['/'] + system_path))\n\n\ndef get_tests_dir():\n return get_dir(['usr', 'share', 'avocado', 'tests'], ['tests'])\n\n\ndef get_avocado_libexec_dir():\n if VIRTUAL_ENV:\n return get_dir(['libexec'])\n elif os.path.exists('/usr/libexec'): # RHEL-like distro\n return get_dir(['usr', 'libexec', 'avocado'])\n else: # Debian-like distro\n return get_dir(['usr', 'lib', 'avocado'])\n\n\ndef get_data_files():\n data_files = [(get_dir(['etc', 'avocado']), ['etc/avocado/avocado.conf'])]\n data_files += [(get_dir(['etc', 'avocado', 'conf.d']),\n ['etc/avocado/conf.d/README', 'etc/avocado/conf.d/gdb.conf'])]\n data_files += [(get_dir(['etc', 'avocado', 'sysinfo']),\n ['etc/avocado/sysinfo/commands', 'etc/avocado/sysinfo/files',\n 'etc/avocado/sysinfo/profilers'])]\n data_files += [(get_tests_dir(), glob.glob('examples/tests/*.py'))]\n for data_dir in glob.glob('examples/tests/*.data'):\n fmt_str = '%s/*' % data_dir\n for f in glob.glob(fmt_str):\n data_files += [(os.path.join(get_tests_dir(),\n os.path.basename(data_dir)), [f])]\n data_files.append((get_dir(['usr', 'share', 'doc', 'avocado'], ['.']),\n ['man/avocado.rst', 'man/avocado-rest-client.rst']))\n data_files += [(get_dir(['usr', 'share', 'avocado', 'wrappers'],\n ['wrappers']),\n glob.glob('examples/wrappers/*.sh'))]\n data_files.append((get_avocado_libexec_dir(), glob.glob('libexec/*')))\n return data_files\n\n\ndef _get_plugin_resource_files(path):\n \"\"\"\n Given a path, return all the files in there to package\n \"\"\"\n flist = []\n for root, _, files in sorted(os.walk(path)):\n for name in files:\n fullname = os.path.join(root, name)\n flist.append(fullname[len('avocado/core/plugins/'):])\n return flist\n\n\ndef get_long_description():\n with open('README.rst', 'r') as req:\n req_contents = req.read()\n return req_contents\n\nif __name__ == '__main__':\n setup(name='avocado',\n version=VERSION,\n description='Avocado Test Framework',\n long_description=get_long_description(),\n author='Avocado Developers',\n author_email='[email protected]',\n url='http://avocado-framework.github.io/',\n packages=['avocado',\n 'avocado.core',\n 'avocado.core.plugins',\n 'avocado.utils',\n 'avocado.utils.external',\n 'avocado.core.remote',\n 'avocado.core.restclient',\n 'avocado.core.restclient.cli',\n 'avocado.core.restclient.cli.args',\n 'avocado.core.restclient.cli.actions'],\n package_data={'avocado.core.plugins': _get_plugin_resource_files(\n 'avocado/core/plugins/resources')},\n data_files=get_data_files(),\n scripts=['scripts/avocado',\n 'scripts/avocado-rest-client'])\n", "path": "setup.py"}]}
1,881
137
gh_patches_debug_42010
rasdani/github-patches
git_diff
hpcaitech__ColossalAI-3037
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [BUG]: 用3张A100显卡使用examples的opt demo会爆显存 ### 🐛 Describe the bug 使用的模型是opt-66b,使用的是80gb显存的A100显卡 使用的脚本bash ./run_gemini.sh 1 0 66b 3 我们直接跑脚本之后,在加载预训练模型时,3张显卡会同时加到80gb,然后报CUDA out of memory 后来改成模型加载到内存的时候使用了400gb的内存,然后爆另外的错误 Detected CUDA files, patching ldflags Emitting ninja build file /root/.cache/colossalai/torch_extensions/torch1.10_cu11.3/build.ninja... Building extension module fused_optim... Allowing ninja to set a default number of workers... (overridable by setting the environment variable MAX_JOBS=N) ninja: no work to do. Loading extension module fused_optim... Time to load fused_optim op: 1.2893073558807373 seconds [02/16/23 15:09:50] INFO colossalai - colossalai - INFO: /root/miniconda3/lib/python3.8/site-packages/colossalai/nn/optimizer/zero_optimizer.py:217 step [02/16/23 15:09:50] INFO colossalai - colossalai - INFO: /root/miniconda3/lib/python3.8/site-packages/colossalai/nn/optimizer/zero_optimizer.py:217 step [02/16/23 15:09:50] INFO colossalai - colossalai - INFO: /root/miniconda3/lib/python3.8/site-packages/colossalai/nn/optimizer/zero_optimizer.py:217 step INFO colossalai - colossalai - INFO: Found overflow. Skip step INFO colossalai - colossalai - INFO: Found overflow. Skip step INFO colossalai - colossalai - INFO: Found overflow. Skip step INFO colossalai - colossalai - INFO: train_gemini_opt.py:205 main INFO colossalai - colossalai - INFO: step 0 finished, Tflops 25.13966826409652 WARNING:torch.distributed.elastic.multiprocessing.api:Sending process 505485 closing signal SIGTERM WARNING:torch.distributed.elastic.multiprocessing.api:Sending process 505487 closing signal SIGTERM WARNING:torch.distributed.elastic.multiprocessing.api:Unable to shutdown process 505487 via 15, forcefully exitting via 9 ERROR:torch.distributed.elastic.multiprocessing.api:failed (exitcode: -9) local_rank: 1 (pid: 505486) of binary: /root/miniconda3/bin/python Traceback (most recent call last): File "/root/miniconda3/bin/torchrun", line 8, in <module> sys.exit(main()) File "/root/miniconda3/lib/python3.8/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py", line 345, in wrapper return f(*args, **kwargs) File "/root/miniconda3/lib/python3.8/site-packages/torch/distributed/run.py", line 719, in main run(args) File "/root/miniconda3/lib/python3.8/site-packages/torch/distributed/run.py", line 710, in run elastic_launch( File "/root/miniconda3/lib/python3.8/site-packages/torch/distributed/launcher/api.py", line 131, in __call__ return launch_agent(self._config, self._entrypoint, list(args)) File "/root/miniconda3/lib/python3.8/site-packages/torch/distributed/launcher/api.py", line 259, in launch_agent raise ChildFailedError( torch.distributed.elastic.multiprocessing.errors.ChildFailedError: ======================================================= train_gemini_opt.py FAILED ------------------------------------------------------- ### Environment _No response_ [tensor] fix some unittests [tensor] fix some unittests </issue> <code> [start of examples/language/opt/train_gemini_opt.py] 1 #!/usr/bin/env python 2 # coding=utf-8 3 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. 4 # 5 # Licensed under the Apache License, Version 2.0 (the "License"); 6 # you may not use this file except in compliance with the License. 7 # You may obtain a copy of the License at 8 # 9 # http://www.apache.org/licenses/LICENSE-2.0 10 # 11 # Unless required by applicable law or agreed to in writing, software 12 # distributed under the License is distributed on an "AS IS" BASIS, 13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 # See the License for the specific language governing permissions and 15 # limitations under the License. 16 """ 17 Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...) 18 on a text file or a dataset without using HuggingFace Trainer. 19 20 Here is the full list of checkpoints on the hub that can be fine-tuned by this script: 21 https://huggingface.co/models?filter=text-generation 22 """ 23 # You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments. 24 25 import time 26 from functools import partial 27 28 import datasets 29 import torch 30 import torch.distributed as dist 31 import transformers 32 from transformers import CONFIG_MAPPING, MODEL_MAPPING, AutoConfig, OPTForCausalLM 33 from transformers.utils.versions import require_version 34 35 import colossalai 36 from colossalai.logging import disable_existing_loggers, get_dist_logger 37 from colossalai.nn.optimizer.gemini_optimizer import GeminiAdamOptimizer 38 from colossalai.nn.parallel import GeminiDDP 39 from colossalai.utils import get_current_device 40 from colossalai.utils.model.colo_init_context import ColoInitContext 41 42 43 def get_data(batch_size, seq_len, vocab_size): 44 input_ids = torch.randint(0, vocab_size, (batch_size, seq_len), device=torch.cuda.current_device()) 45 attention_mask = torch.ones_like(input_ids) 46 return input_ids, attention_mask 47 48 49 require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt") 50 51 MODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys()) 52 MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) 53 54 55 def get_time_stamp(): 56 torch.cuda.synchronize() 57 return time.time() 58 59 60 def get_tflops(model_numel, batch_size, seq_len, step_time): 61 return model_numel * batch_size * seq_len * 8 / 1e12 / (step_time + 1e-12) 62 63 64 def parse_args(): 65 parser = colossalai.get_default_parser() 66 parser.add_argument( 67 "--model_name_or_path", 68 type=str, 69 help="Path to pretrained model or model identifier from huggingface.co/models.", 70 required=True, 71 ) 72 parser.add_argument( 73 "--config_name", 74 type=str, 75 default=None, 76 help="Pretrained config name or path if not the same as model_name", 77 ) 78 parser.add_argument( 79 "--batch_size", 80 type=int, 81 default=8, 82 help="Batch size (per dp group) for the training dataloader.", 83 ) 84 parser.add_argument( 85 "--learning_rate", 86 type=float, 87 default=5e-5, 88 help="Initial learning rate (after the potential warmup period) to use.", 89 ) 90 parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") 91 parser.add_argument( 92 "--max_train_steps", 93 type=int, 94 default=20, 95 help="Total number of training steps to perform.", 96 ) 97 parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") 98 parser.add_argument( 99 "--model_type", 100 type=str, 101 default=None, 102 help="Model type to use if training from scratch.", 103 choices=MODEL_TYPES, 104 ) 105 parser.add_argument("--mem_cap", type=int, default=0, help="use mem cap") 106 parser.add_argument("--init_in_cpu", action='store_true', default=False, help="init training model in cpu") 107 args = parser.parse_args() 108 109 return args 110 111 112 def colo_memory_cap(size_in_GB): 113 from colossalai.utils import colo_device_memory_capacity, colo_set_process_memory_fraction, get_current_device 114 cuda_capacity = colo_device_memory_capacity(get_current_device()) 115 if size_in_GB * (1024**3) < cuda_capacity: 116 colo_set_process_memory_fraction(size_in_GB * (1024**3) / cuda_capacity) 117 print("Using {} GB of GPU memory".format(size_in_GB)) 118 119 120 def main(): 121 args = parse_args() 122 disable_existing_loggers() 123 colossalai.launch_from_torch({}) 124 logger = get_dist_logger() 125 is_main_process = dist.get_rank() == 0 126 127 if is_main_process: 128 datasets.utils.logging.set_verbosity_warning() 129 transformers.utils.logging.set_verbosity_info() 130 else: 131 datasets.utils.logging.set_verbosity_error() 132 transformers.utils.logging.set_verbosity_error() 133 134 if args.mem_cap > 0: 135 colo_memory_cap(args.mem_cap) 136 137 # If passed along, set the training seed now. 138 if args.seed is not None: 139 torch.mannul_seed(args.seed) 140 logger.info(f"Rank {dist.get_rank()}: random seed is set to {args.seed}") 141 142 # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at 143 # https://huggingface.co/docs/datasets/loading_datasets.html. 144 145 # Load pretrained model 146 # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently 147 # download model & vocab. 148 if args.config_name: 149 config = AutoConfig.from_pretrained(args.config_name) 150 elif args.model_name_or_path: 151 config = AutoConfig.from_pretrained(args.model_name_or_path) 152 else: 153 config = CONFIG_MAPPING[args.model_type]() 154 logger.warning("You are instantiating a new config instance from scratch.") 155 logger.info("Model config has been created", ranks=[0]) 156 157 if args.init_in_cpu: 158 init_dev = torch.device('cpu') 159 else: 160 init_dev = get_current_device() 161 162 # build model 163 if args.model_name_or_path is None or args.model_name_or_path == 'facebook/opt-13b': 164 # currently, there has a bug in pretrained opt-13b 165 # we can not import it until huggingface fix it 166 logger.info("Train a new model from scratch", ranks=[0]) 167 with ColoInitContext(device=init_dev, dtype=torch.half): 168 model = OPTForCausalLM(config) 169 else: 170 logger.info("Finetune a pre-trained model", ranks=[0]) 171 with ColoInitContext(device=init_dev, dtype=torch.half): 172 model = OPTForCausalLM.from_pretrained(args.model_name_or_path, 173 from_tf=bool(".ckpt" in args.model_name_or_path), 174 config=config, 175 local_files_only=False) 176 177 # enable graident checkpointing 178 model.gradient_checkpointing_enable() 179 180 numel = sum([p.numel() for p in model.parameters()]) 181 PLACEMENT_POLICY = 'cpu' 182 model = GeminiDDP(model, device=get_current_device(), placement_policy=PLACEMENT_POLICY, pin_memory=True) 183 optimizer = GeminiAdamOptimizer(model, lr=args.learning_rate, initial_scale=2**14, gpu_margin_mem_ratio=0.0) 184 185 SEQ_LEN = 1024 186 VOCAB_SIZE = 50257 187 188 get_tflops_func = partial(get_tflops, numel, args.batch_size, SEQ_LEN) 189 190 model.train() 191 for step in range(args.max_train_steps): 192 st_time = time.time() 193 input_ids, attn_mask = get_data(args.batch_size, SEQ_LEN, VOCAB_SIZE) 194 195 outputs = model(input_ids=input_ids, attention_mask=attn_mask, labels=input_ids, use_cache=False) 196 loss = outputs['loss'] 197 optimizer.backward(loss) 198 199 optimizer.step() 200 optimizer.zero_grad() 201 torch.cuda.synchronize() 202 step_time = time.time() - st_time 203 step_tflops = get_tflops_func(step_time) 204 205 logger.info("step {} finished, Tflops {}".format(step, step_tflops), ranks=[0]) 206 207 logger.info("Training finished", ranks=[0]) 208 209 210 if __name__ == "__main__": 211 main() 212 [end of examples/language/opt/train_gemini_opt.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/language/opt/train_gemini_opt.py b/examples/language/opt/train_gemini_opt.py --- a/examples/language/opt/train_gemini_opt.py +++ b/examples/language/opt/train_gemini_opt.py @@ -39,6 +39,8 @@ from colossalai.utils import get_current_device from colossalai.utils.model.colo_init_context import ColoInitContext +from colossalai.tensor import ProcessGroup, ShardSpec + def get_data(batch_size, seq_len, vocab_size): input_ids = torch.randint(0, vocab_size, (batch_size, seq_len), device=torch.cuda.current_device()) @@ -102,6 +104,11 @@ help="Model type to use if training from scratch.", choices=MODEL_TYPES, ) + parser.add_argument( + "--shardinit", + action="store_true", + help="Initialize the model with tensor parallel", + ) parser.add_argument("--mem_cap", type=int, default=0, help="use mem cap") parser.add_argument("--init_in_cpu", action='store_true', default=False, help="init training model in cpu") args = parser.parse_args() @@ -159,16 +166,30 @@ else: init_dev = get_current_device() + # shard init prameters + if args.shardinit: + logger.info("Sharding initialization !", ranks=[0]) + else: + logger.info("Skipping sharding initialization", ranks=[0]) + + world_size = torch.distributed.get_world_size() + shard_pg = ProcessGroup(tp_degree=world_size) if args.shardinit else None + default_dist_spec = ShardSpec([-1], [world_size]) if args.shardinit else None + # build model if args.model_name_or_path is None or args.model_name_or_path == 'facebook/opt-13b': # currently, there has a bug in pretrained opt-13b # we can not import it until huggingface fix it logger.info("Train a new model from scratch", ranks=[0]) - with ColoInitContext(device=init_dev, dtype=torch.half): + with ColoInitContext(device=init_dev, dtype=torch.half, + default_dist_spec=default_dist_spec, + default_pg=shard_pg): model = OPTForCausalLM(config) else: logger.info("Finetune a pre-trained model", ranks=[0]) - with ColoInitContext(device=init_dev, dtype=torch.half): + with ColoInitContext(device=init_dev, dtype=torch.half, + default_dist_spec=default_dist_spec, + default_pg=shard_pg): model = OPTForCausalLM.from_pretrained(args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, @@ -179,7 +200,8 @@ numel = sum([p.numel() for p in model.parameters()]) PLACEMENT_POLICY = 'cpu' - model = GeminiDDP(model, device=get_current_device(), placement_policy=PLACEMENT_POLICY, pin_memory=True) + model = GeminiDDP(model, device=get_current_device(), placement_policy=PLACEMENT_POLICY, + pin_memory=True, strict_ddp_mode=args.shardinit) optimizer = GeminiAdamOptimizer(model, lr=args.learning_rate, initial_scale=2**14, gpu_margin_mem_ratio=0.0) SEQ_LEN = 1024
{"golden_diff": "diff --git a/examples/language/opt/train_gemini_opt.py b/examples/language/opt/train_gemini_opt.py\n--- a/examples/language/opt/train_gemini_opt.py\n+++ b/examples/language/opt/train_gemini_opt.py\n@@ -39,6 +39,8 @@\n from colossalai.utils import get_current_device\n from colossalai.utils.model.colo_init_context import ColoInitContext\n \n+from colossalai.tensor import ProcessGroup, ShardSpec\n+\n \n def get_data(batch_size, seq_len, vocab_size):\n input_ids = torch.randint(0, vocab_size, (batch_size, seq_len), device=torch.cuda.current_device())\n@@ -102,6 +104,11 @@\n help=\"Model type to use if training from scratch.\",\n choices=MODEL_TYPES,\n )\n+ parser.add_argument(\n+ \"--shardinit\",\n+ action=\"store_true\",\n+ help=\"Initialize the model with tensor parallel\",\n+ )\n parser.add_argument(\"--mem_cap\", type=int, default=0, help=\"use mem cap\")\n parser.add_argument(\"--init_in_cpu\", action='store_true', default=False, help=\"init training model in cpu\")\n args = parser.parse_args()\n@@ -159,16 +166,30 @@\n else:\n init_dev = get_current_device()\n \n+ # shard init prameters\n+ if args.shardinit:\n+ logger.info(\"Sharding initialization !\", ranks=[0])\n+ else:\n+ logger.info(\"Skipping sharding initialization\", ranks=[0])\n+\n+ world_size = torch.distributed.get_world_size()\n+ shard_pg = ProcessGroup(tp_degree=world_size) if args.shardinit else None\n+ default_dist_spec = ShardSpec([-1], [world_size]) if args.shardinit else None\n+\n # build model\n if args.model_name_or_path is None or args.model_name_or_path == 'facebook/opt-13b':\n # currently, there has a bug in pretrained opt-13b\n # we can not import it until huggingface fix it\n logger.info(\"Train a new model from scratch\", ranks=[0])\n- with ColoInitContext(device=init_dev, dtype=torch.half):\n+ with ColoInitContext(device=init_dev, dtype=torch.half,\n+ default_dist_spec=default_dist_spec,\n+ default_pg=shard_pg):\n model = OPTForCausalLM(config)\n else:\n logger.info(\"Finetune a pre-trained model\", ranks=[0])\n- with ColoInitContext(device=init_dev, dtype=torch.half):\n+ with ColoInitContext(device=init_dev, dtype=torch.half,\n+ default_dist_spec=default_dist_spec,\n+ default_pg=shard_pg):\n model = OPTForCausalLM.from_pretrained(args.model_name_or_path,\n from_tf=bool(\".ckpt\" in args.model_name_or_path),\n config=config,\n@@ -179,7 +200,8 @@\n \n numel = sum([p.numel() for p in model.parameters()])\n PLACEMENT_POLICY = 'cpu'\n- model = GeminiDDP(model, device=get_current_device(), placement_policy=PLACEMENT_POLICY, pin_memory=True)\n+ model = GeminiDDP(model, device=get_current_device(), placement_policy=PLACEMENT_POLICY,\n+ pin_memory=True, strict_ddp_mode=args.shardinit)\n optimizer = GeminiAdamOptimizer(model, lr=args.learning_rate, initial_scale=2**14, gpu_margin_mem_ratio=0.0)\n \n SEQ_LEN = 1024\n", "issue": "[BUG]: \u75283\u5f20A100\u663e\u5361\u4f7f\u7528examples\u7684opt demo\u4f1a\u7206\u663e\u5b58\n### \ud83d\udc1b Describe the bug\n\n\u4f7f\u7528\u7684\u6a21\u578b\u662fopt-66b\uff0c\u4f7f\u7528\u7684\u662f80gb\u663e\u5b58\u7684A100\u663e\u5361\r\n\u4f7f\u7528\u7684\u811a\u672cbash ./run_gemini.sh 1 0 66b 3\r\n\u6211\u4eec\u76f4\u63a5\u8dd1\u811a\u672c\u4e4b\u540e\uff0c\u5728\u52a0\u8f7d\u9884\u8bad\u7ec3\u6a21\u578b\u65f6\uff0c3\u5f20\u663e\u5361\u4f1a\u540c\u65f6\u52a0\u523080gb\uff0c\u7136\u540e\u62a5CUDA out of memory\r\n\u540e\u6765\u6539\u6210\u6a21\u578b\u52a0\u8f7d\u5230\u5185\u5b58\u7684\u65f6\u5019\u4f7f\u7528\u4e86400gb\u7684\u5185\u5b58\uff0c\u7136\u540e\u7206\u53e6\u5916\u7684\u9519\u8bef\r\nDetected CUDA files, patching ldflags\r\nEmitting ninja build file /root/.cache/colossalai/torch_extensions/torch1.10_cu11.3/build.ninja...\r\nBuilding extension module fused_optim...\r\nAllowing ninja to set a default number of workers... (overridable by setting the environment variable MAX_JOBS=N)\r\nninja: no work to do.\r\nLoading extension module fused_optim...\r\nTime to load fused_optim op: 1.2893073558807373 seconds\r\n[02/16/23 15:09:50] INFO colossalai - colossalai - INFO: /root/miniconda3/lib/python3.8/site-packages/colossalai/nn/optimizer/zero_optimizer.py:217 step \r\n[02/16/23 15:09:50] INFO colossalai - colossalai - INFO: /root/miniconda3/lib/python3.8/site-packages/colossalai/nn/optimizer/zero_optimizer.py:217 step \r\n[02/16/23 15:09:50] INFO colossalai - colossalai - INFO: /root/miniconda3/lib/python3.8/site-packages/colossalai/nn/optimizer/zero_optimizer.py:217 step \r\n INFO colossalai - colossalai - INFO: Found overflow. Skip step \r\n INFO colossalai - colossalai - INFO: Found overflow. Skip step \r\n INFO colossalai - colossalai - INFO: Found overflow. Skip step \r\n INFO colossalai - colossalai - INFO: train_gemini_opt.py:205 main \r\n INFO colossalai - colossalai - INFO: step 0 finished, Tflops 25.13966826409652 \r\nWARNING:torch.distributed.elastic.multiprocessing.api:Sending process 505485 closing signal SIGTERM\r\nWARNING:torch.distributed.elastic.multiprocessing.api:Sending process 505487 closing signal SIGTERM\r\nWARNING:torch.distributed.elastic.multiprocessing.api:Unable to shutdown process 505487 via 15, forcefully exitting via 9\r\nERROR:torch.distributed.elastic.multiprocessing.api:failed (exitcode: -9) local_rank: 1 (pid: 505486) of binary: /root/miniconda3/bin/python\r\nTraceback (most recent call last):\r\n File \"/root/miniconda3/bin/torchrun\", line 8, in <module>\r\n sys.exit(main())\r\n File \"/root/miniconda3/lib/python3.8/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py\", line 345, in wrapper\r\n return f(*args, **kwargs)\r\n File \"/root/miniconda3/lib/python3.8/site-packages/torch/distributed/run.py\", line 719, in main\r\n run(args)\r\n File \"/root/miniconda3/lib/python3.8/site-packages/torch/distributed/run.py\", line 710, in run\r\n elastic_launch(\r\n File \"/root/miniconda3/lib/python3.8/site-packages/torch/distributed/launcher/api.py\", line 131, in __call__\r\n return launch_agent(self._config, self._entrypoint, list(args))\r\n File \"/root/miniconda3/lib/python3.8/site-packages/torch/distributed/launcher/api.py\", line 259, in launch_agent\r\n raise ChildFailedError(\r\ntorch.distributed.elastic.multiprocessing.errors.ChildFailedError: \r\n=======================================================\r\ntrain_gemini_opt.py FAILED\r\n-------------------------------------------------------\r\n\r\n\n\n### Environment\n\n_No response_\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# coding=utf-8\n# Copyright 2021 The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nFine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...)\non a text file or a dataset without using HuggingFace Trainer.\n\nHere is the full list of checkpoints on the hub that can be fine-tuned by this script:\nhttps://huggingface.co/models?filter=text-generation\n\"\"\"\n# You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments.\n\nimport time\nfrom functools import partial\n\nimport datasets\nimport torch\nimport torch.distributed as dist\nimport transformers\nfrom transformers import CONFIG_MAPPING, MODEL_MAPPING, AutoConfig, OPTForCausalLM\nfrom transformers.utils.versions import require_version\n\nimport colossalai\nfrom colossalai.logging import disable_existing_loggers, get_dist_logger\nfrom colossalai.nn.optimizer.gemini_optimizer import GeminiAdamOptimizer\nfrom colossalai.nn.parallel import GeminiDDP\nfrom colossalai.utils import get_current_device\nfrom colossalai.utils.model.colo_init_context import ColoInitContext\n\n\ndef get_data(batch_size, seq_len, vocab_size):\n input_ids = torch.randint(0, vocab_size, (batch_size, seq_len), device=torch.cuda.current_device())\n attention_mask = torch.ones_like(input_ids)\n return input_ids, attention_mask\n\n\nrequire_version(\"datasets>=1.8.0\", \"To fix: pip install -r examples/pytorch/language-modeling/requirements.txt\")\n\nMODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys())\nMODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)\n\n\ndef get_time_stamp():\n torch.cuda.synchronize()\n return time.time()\n\n\ndef get_tflops(model_numel, batch_size, seq_len, step_time):\n return model_numel * batch_size * seq_len * 8 / 1e12 / (step_time + 1e-12)\n\n\ndef parse_args():\n parser = colossalai.get_default_parser()\n parser.add_argument(\n \"--model_name_or_path\",\n type=str,\n help=\"Path to pretrained model or model identifier from huggingface.co/models.\",\n required=True,\n )\n parser.add_argument(\n \"--config_name\",\n type=str,\n default=None,\n help=\"Pretrained config name or path if not the same as model_name\",\n )\n parser.add_argument(\n \"--batch_size\",\n type=int,\n default=8,\n help=\"Batch size (per dp group) for the training dataloader.\",\n )\n parser.add_argument(\n \"--learning_rate\",\n type=float,\n default=5e-5,\n help=\"Initial learning rate (after the potential warmup period) to use.\",\n )\n parser.add_argument(\"--weight_decay\", type=float, default=0.0, help=\"Weight decay to use.\")\n parser.add_argument(\n \"--max_train_steps\",\n type=int,\n default=20,\n help=\"Total number of training steps to perform.\",\n )\n parser.add_argument(\"--seed\", type=int, default=None, help=\"A seed for reproducible training.\")\n parser.add_argument(\n \"--model_type\",\n type=str,\n default=None,\n help=\"Model type to use if training from scratch.\",\n choices=MODEL_TYPES,\n )\n parser.add_argument(\"--mem_cap\", type=int, default=0, help=\"use mem cap\")\n parser.add_argument(\"--init_in_cpu\", action='store_true', default=False, help=\"init training model in cpu\")\n args = parser.parse_args()\n\n return args\n\n\ndef colo_memory_cap(size_in_GB):\n from colossalai.utils import colo_device_memory_capacity, colo_set_process_memory_fraction, get_current_device\n cuda_capacity = colo_device_memory_capacity(get_current_device())\n if size_in_GB * (1024**3) < cuda_capacity:\n colo_set_process_memory_fraction(size_in_GB * (1024**3) / cuda_capacity)\n print(\"Using {} GB of GPU memory\".format(size_in_GB))\n\n\ndef main():\n args = parse_args()\n disable_existing_loggers()\n colossalai.launch_from_torch({})\n logger = get_dist_logger()\n is_main_process = dist.get_rank() == 0\n\n if is_main_process:\n datasets.utils.logging.set_verbosity_warning()\n transformers.utils.logging.set_verbosity_info()\n else:\n datasets.utils.logging.set_verbosity_error()\n transformers.utils.logging.set_verbosity_error()\n\n if args.mem_cap > 0:\n colo_memory_cap(args.mem_cap)\n\n # If passed along, set the training seed now.\n if args.seed is not None:\n torch.mannul_seed(args.seed)\n logger.info(f\"Rank {dist.get_rank()}: random seed is set to {args.seed}\")\n\n # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at\n # https://huggingface.co/docs/datasets/loading_datasets.html.\n\n # Load pretrained model\n # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently\n # download model & vocab.\n if args.config_name:\n config = AutoConfig.from_pretrained(args.config_name)\n elif args.model_name_or_path:\n config = AutoConfig.from_pretrained(args.model_name_or_path)\n else:\n config = CONFIG_MAPPING[args.model_type]()\n logger.warning(\"You are instantiating a new config instance from scratch.\")\n logger.info(\"Model config has been created\", ranks=[0])\n\n if args.init_in_cpu:\n init_dev = torch.device('cpu')\n else:\n init_dev = get_current_device()\n\n # build model\n if args.model_name_or_path is None or args.model_name_or_path == 'facebook/opt-13b':\n # currently, there has a bug in pretrained opt-13b\n # we can not import it until huggingface fix it\n logger.info(\"Train a new model from scratch\", ranks=[0])\n with ColoInitContext(device=init_dev, dtype=torch.half):\n model = OPTForCausalLM(config)\n else:\n logger.info(\"Finetune a pre-trained model\", ranks=[0])\n with ColoInitContext(device=init_dev, dtype=torch.half):\n model = OPTForCausalLM.from_pretrained(args.model_name_or_path,\n from_tf=bool(\".ckpt\" in args.model_name_or_path),\n config=config,\n local_files_only=False)\n\n # enable graident checkpointing\n model.gradient_checkpointing_enable()\n\n numel = sum([p.numel() for p in model.parameters()])\n PLACEMENT_POLICY = 'cpu'\n model = GeminiDDP(model, device=get_current_device(), placement_policy=PLACEMENT_POLICY, pin_memory=True)\n optimizer = GeminiAdamOptimizer(model, lr=args.learning_rate, initial_scale=2**14, gpu_margin_mem_ratio=0.0)\n\n SEQ_LEN = 1024\n VOCAB_SIZE = 50257\n\n get_tflops_func = partial(get_tflops, numel, args.batch_size, SEQ_LEN)\n\n model.train()\n for step in range(args.max_train_steps):\n st_time = time.time()\n input_ids, attn_mask = get_data(args.batch_size, SEQ_LEN, VOCAB_SIZE)\n\n outputs = model(input_ids=input_ids, attention_mask=attn_mask, labels=input_ids, use_cache=False)\n loss = outputs['loss']\n optimizer.backward(loss)\n\n optimizer.step()\n optimizer.zero_grad()\n torch.cuda.synchronize()\n step_time = time.time() - st_time\n step_tflops = get_tflops_func(step_time)\n\n logger.info(\"step {} finished, Tflops {}\".format(step, step_tflops), ranks=[0])\n\n logger.info(\"Training finished\", ranks=[0])\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "examples/language/opt/train_gemini_opt.py"}]}
3,868
765
gh_patches_debug_38351
rasdani/github-patches
git_diff
meltano__meltano-6534
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Meltano lock fails on migrating to >2.0.0 when variant didnt previously exist We should update the log message and add a section to the migration docs to help users get variants added for plugins that didnt have a variant previously. For example Airflow locking fails because a variant isnt set for all installations prior to 2.0.0. The error message isnt totally clear in saying this and users will need to manually update their meltano.yml. Originally discussed in https://github.com/meltano/meltano/issues/6359#issuecomment-1175432513 > Additionally I'm getting Orchestrator 'airflow' variant 'original' is not known to Meltano. Variants: ['apache (default)'] from a default installation of Airflow. I know that its hard because when I originally installed it there wasnt a variant name for airflow but now there is. Is there a way for us to resolve that its the default variant? Maybe that wont work because the default could change, maybe using the discovery.yml when we think were migrating to >2.0.0? > > Or we could log it better and say something like "I notice youre migrating and you dont have a variant, go to the Migration guide and learn how to define a variant" and we write up the steps for locking post migration and adding a variant name: > > remove executable/namespace if defined > add variant using one available on MeltanoHub. We can list the mapping like airflow -> apache, dbt -> dbt-labs, etc. for plugins that just got variant names > run meltano lock --all cc @edgarrmondragon </issue> <code> [start of src/meltano/core/hub/client.py] 1 """Meltano Hub Client.""" 2 3 from __future__ import annotations 4 5 from typing import Any 6 7 import requests 8 from structlog.stdlib import get_logger 9 10 import meltano 11 from meltano.core.plugin import ( 12 BasePlugin, 13 PluginDefinition, 14 PluginRef, 15 PluginType, 16 Variant, 17 ) 18 from meltano.core.plugin.error import PluginNotFoundError 19 from meltano.core.plugin.factory import base_plugin_factory 20 from meltano.core.plugin_discovery_service import PluginRepository 21 from meltano.core.project import Project 22 from meltano.core.project_settings_service import ProjectSettingsService 23 24 from .schema import IndexedPlugin, VariantRef 25 26 logger = get_logger(__name__) 27 28 29 class HubPluginTypeNotFound(Exception): 30 """Raised when a Hub plugin type is not found.""" 31 32 def __init__(self, plugin_type: PluginType): 33 """Create a new HubPluginVariantNotFound. 34 35 Args: 36 plugin_type: The type of the plugin. 37 """ 38 self.plugin_type = plugin_type 39 40 def __str__(self) -> str: 41 """Return a string representation of the error. 42 43 Returns: 44 The string representation of the error. 45 """ 46 return "{type} is not supported in Meltano Hub. Available plugin types: {types}".format( 47 type=self.plugin_type.descriptor.capitalize(), 48 types=list(PluginType), 49 ) 50 51 52 class HubPluginVariantNotFound(Exception): 53 """Raised when a Hub plugin variant is not found.""" 54 55 def __init__( 56 self, 57 plugin_type: PluginType, 58 plugin: IndexedPlugin, 59 variant_name: str, 60 ): 61 """Create a new HubPluginVariantNotFound. 62 63 Args: 64 plugin_type: The type of the plugin. 65 plugin: The indexed plugin. 66 variant_name: The name of the variant that was not found. 67 """ 68 self.plugin_type = plugin_type 69 self.plugin = plugin 70 self.variant_name = variant_name 71 72 def __str__(self) -> str: 73 """Return a string representation of the error. 74 75 Returns: 76 The string representation of the error. 77 """ 78 return "{type} '{name}' variant '{variant}' is not known to Meltano. Variants: {variant_labels}".format( 79 type=self.plugin_type.descriptor.capitalize(), 80 name=self.plugin.name, 81 variant=self.variant_name, 82 variant_labels=self.plugin.variant_labels, 83 ) 84 85 86 class MeltanoHubService(PluginRepository): 87 """PluginRepository implementation for the Meltano Hub.""" 88 89 def __init__(self, project: Project) -> None: 90 """Initialize the service. 91 92 Args: 93 project: The Meltano project. 94 """ 95 self.project = project 96 self.session = requests.Session() 97 self.session.headers.update( 98 { 99 "Accept": "application/json", 100 "User-Agent": f"Meltano/{meltano.__version__}", 101 } 102 ) 103 104 self.settings_service = ProjectSettingsService(self.project) 105 106 if self.settings_service.get("send_anonymous_usage_stats"): 107 project_id = self.settings_service.get("project_id") 108 109 self.session.headers["X-Project-ID"] = project_id 110 111 @property 112 def hub_api_url(self): 113 """Return the URL of the Hub API. 114 115 Returns: 116 The URL of the Hub API. 117 """ 118 hub_url = self.settings_service.get("hub_url") 119 return f"{hub_url}/meltano/api/v1" 120 121 def plugin_type_endpoint(self, plugin_type: PluginType) -> str: 122 """Return the list endpoint for the given plugin type. 123 124 Args: 125 plugin_type: The plugin type. 126 127 Returns: 128 The endpoint for the given plugin type. 129 """ 130 return f"{self.hub_api_url}/plugins/{plugin_type.value}/index" 131 132 def plugin_endpoint( 133 self, 134 plugin_type: PluginType, 135 plugin_name: str, 136 variant_name: str | None = None, 137 ) -> str: 138 """Return the resource endpoint for the given plugin. 139 140 Args: 141 plugin_type: The plugin type. 142 plugin_name: The plugin name. 143 variant_name: The plugin variant name. 144 145 Returns: 146 The endpoint for the given plugin type. 147 """ 148 url = f"{self.hub_api_url}/plugins/{plugin_type.value}/{plugin_name}" 149 if variant_name: 150 url = f"{url}--{variant_name}" 151 152 return url 153 154 def find_definition( 155 self, 156 plugin_type: PluginType, 157 plugin_name: str, 158 variant_name: str | None = None, 159 ) -> PluginDefinition: 160 """Find a locked plugin definition. 161 162 Args: 163 plugin_type: The plugin type. 164 plugin_name: The plugin name. 165 variant_name: The plugin variant name. 166 167 Returns: 168 The plugin definition. 169 170 Raises: 171 PluginNotFoundError: If the plugin definition could not be found. 172 HubPluginVariantNotFound: If the plugin variant could not be found. 173 """ 174 plugins = self.get_plugins_of_type(plugin_type) 175 176 try: 177 plugin = plugins[plugin_name] 178 except KeyError as plugins_key_err: 179 raise PluginNotFoundError( 180 PluginRef(plugin_type, plugin_name) 181 ) from plugins_key_err 182 183 if variant_name is None or variant_name == Variant.DEFAULT_NAME: 184 variant_name = plugin.default_variant 185 186 try: 187 url = plugin.variants[variant_name].ref 188 except KeyError as variant_key_err: 189 raise HubPluginVariantNotFound( 190 plugin_type, plugin, variant_name 191 ) from variant_key_err 192 193 response = self.session.get(url) 194 195 try: 196 response.raise_for_status() 197 except requests.HTTPError as http_err: 198 logger.error( 199 "Can not retrieve plugin", 200 status_code=http_err.response.status_code, 201 error=http_err, 202 ) 203 raise PluginNotFoundError(PluginRef(plugin_type, plugin_name)) from http_err 204 205 return PluginDefinition(**response.json(), plugin_type=plugin_type) 206 207 def find_base_plugin( 208 self, 209 plugin_type: PluginType, 210 plugin_name: str, 211 variant: str | None = None, 212 ) -> BasePlugin: 213 """Get the base plugin for a project plugin. 214 215 Args: 216 plugin_type: The plugin type. 217 plugin_name: The plugin name. 218 variant: The plugin variant. 219 220 Returns: 221 The base plugin. 222 """ 223 plugin = self.find_definition( 224 plugin_type, 225 plugin_name, 226 variant_name=variant, 227 ) 228 229 return base_plugin_factory(plugin, plugin.variants[0]) 230 231 def get_plugins_of_type(self, plugin_type: PluginType) -> dict[str, IndexedPlugin]: 232 """Get all plugins of a given type. 233 234 Args: 235 plugin_type: The plugin type. 236 237 Returns: 238 The plugin definitions. 239 240 Raises: 241 HubPluginTypeNotFound: If the plugin type is not supported. 242 """ 243 if not plugin_type.discoverable: 244 return {} 245 246 url = self.plugin_type_endpoint(plugin_type) 247 response = self.session.get(url) 248 249 try: 250 response.raise_for_status() 251 except requests.HTTPError as err: 252 logger.error( 253 "Can not retrieve plugin type", 254 status_code=err.response.status_code, 255 error=err, 256 ) 257 raise HubPluginTypeNotFound(plugin_type) from err 258 259 plugins: dict[str, dict[str, Any]] = response.json() 260 return { 261 name: IndexedPlugin( 262 name, 263 logo_url=plugin["logo_url"], 264 default_variant=plugin["default_variant"], 265 variants={ 266 variant_name: VariantRef(variant_name, ref=variant["ref"]) 267 for variant_name, variant in plugin["variants"].items() 268 }, 269 ) 270 for name, plugin in plugins.items() 271 } 272 [end of src/meltano/core/hub/client.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/meltano/core/hub/client.py b/src/meltano/core/hub/client.py --- a/src/meltano/core/hub/client.py +++ b/src/meltano/core/hub/client.py @@ -26,7 +26,7 @@ logger = get_logger(__name__) -class HubPluginTypeNotFound(Exception): +class HubPluginTypeNotFoundError(Exception): """Raised when a Hub plugin type is not found.""" def __init__(self, plugin_type: PluginType): @@ -49,7 +49,7 @@ ) -class HubPluginVariantNotFound(Exception): +class HubPluginVariantNotFoundError(Exception): """Raised when a Hub plugin variant is not found.""" def __init__( @@ -169,7 +169,7 @@ Raises: PluginNotFoundError: If the plugin definition could not be found. - HubPluginVariantNotFound: If the plugin variant could not be found. + HubPluginVariantNotFoundError: If the plugin variant could not be found. """ plugins = self.get_plugins_of_type(plugin_type) @@ -180,13 +180,16 @@ PluginRef(plugin_type, plugin_name) ) from plugins_key_err - if variant_name is None or variant_name == Variant.DEFAULT_NAME: + if variant_name is None or variant_name in { + Variant.DEFAULT_NAME, + Variant.ORIGINAL_NAME, + }: variant_name = plugin.default_variant try: url = plugin.variants[variant_name].ref except KeyError as variant_key_err: - raise HubPluginVariantNotFound( + raise HubPluginVariantNotFoundError( plugin_type, plugin, variant_name ) from variant_key_err @@ -238,7 +241,7 @@ The plugin definitions. Raises: - HubPluginTypeNotFound: If the plugin type is not supported. + HubPluginTypeNotFoundError: If the plugin type is not supported. """ if not plugin_type.discoverable: return {} @@ -254,7 +257,7 @@ status_code=err.response.status_code, error=err, ) - raise HubPluginTypeNotFound(plugin_type) from err + raise HubPluginTypeNotFoundError(plugin_type) from err plugins: dict[str, dict[str, Any]] = response.json() return {
{"golden_diff": "diff --git a/src/meltano/core/hub/client.py b/src/meltano/core/hub/client.py\n--- a/src/meltano/core/hub/client.py\n+++ b/src/meltano/core/hub/client.py\n@@ -26,7 +26,7 @@\n logger = get_logger(__name__)\n \n \n-class HubPluginTypeNotFound(Exception):\n+class HubPluginTypeNotFoundError(Exception):\n \"\"\"Raised when a Hub plugin type is not found.\"\"\"\n \n def __init__(self, plugin_type: PluginType):\n@@ -49,7 +49,7 @@\n )\n \n \n-class HubPluginVariantNotFound(Exception):\n+class HubPluginVariantNotFoundError(Exception):\n \"\"\"Raised when a Hub plugin variant is not found.\"\"\"\n \n def __init__(\n@@ -169,7 +169,7 @@\n \n Raises:\n PluginNotFoundError: If the plugin definition could not be found.\n- HubPluginVariantNotFound: If the plugin variant could not be found.\n+ HubPluginVariantNotFoundError: If the plugin variant could not be found.\n \"\"\"\n plugins = self.get_plugins_of_type(plugin_type)\n \n@@ -180,13 +180,16 @@\n PluginRef(plugin_type, plugin_name)\n ) from plugins_key_err\n \n- if variant_name is None or variant_name == Variant.DEFAULT_NAME:\n+ if variant_name is None or variant_name in {\n+ Variant.DEFAULT_NAME,\n+ Variant.ORIGINAL_NAME,\n+ }:\n variant_name = plugin.default_variant\n \n try:\n url = plugin.variants[variant_name].ref\n except KeyError as variant_key_err:\n- raise HubPluginVariantNotFound(\n+ raise HubPluginVariantNotFoundError(\n plugin_type, plugin, variant_name\n ) from variant_key_err\n \n@@ -238,7 +241,7 @@\n The plugin definitions.\n \n Raises:\n- HubPluginTypeNotFound: If the plugin type is not supported.\n+ HubPluginTypeNotFoundError: If the plugin type is not supported.\n \"\"\"\n if not plugin_type.discoverable:\n return {}\n@@ -254,7 +257,7 @@\n status_code=err.response.status_code,\n error=err,\n )\n- raise HubPluginTypeNotFound(plugin_type) from err\n+ raise HubPluginTypeNotFoundError(plugin_type) from err\n \n plugins: dict[str, dict[str, Any]] = response.json()\n return {\n", "issue": "Meltano lock fails on migrating to >2.0.0 when variant didnt previously exist\nWe should update the log message and add a section to the migration docs to help users get variants added for plugins that didnt have a variant previously. For example Airflow locking fails because a variant isnt set for all installations prior to 2.0.0. The error message isnt totally clear in saying this and users will need to manually update their meltano.yml.\r\n\r\nOriginally discussed in https://github.com/meltano/meltano/issues/6359#issuecomment-1175432513\r\n\r\n> Additionally I'm getting Orchestrator 'airflow' variant 'original' is not known to Meltano. Variants: ['apache (default)'] from a default installation of Airflow. I know that its hard because when I originally installed it there wasnt a variant name for airflow but now there is. Is there a way for us to resolve that its the default variant? Maybe that wont work because the default could change, maybe using the discovery.yml when we think were migrating to >2.0.0?\r\n> \r\n> Or we could log it better and say something like \"I notice youre migrating and you dont have a variant, go to the Migration guide and learn how to define a variant\" and we write up the steps for locking post migration and adding a variant name:\r\n> \r\n> remove executable/namespace if defined\r\n> add variant using one available on MeltanoHub. We can list the mapping like airflow -> apache, dbt -> dbt-labs, etc. for plugins that just got variant names\r\n> run meltano lock --all\r\n\r\ncc @edgarrmondragon \n", "before_files": [{"content": "\"\"\"Meltano Hub Client.\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import Any\n\nimport requests\nfrom structlog.stdlib import get_logger\n\nimport meltano\nfrom meltano.core.plugin import (\n BasePlugin,\n PluginDefinition,\n PluginRef,\n PluginType,\n Variant,\n)\nfrom meltano.core.plugin.error import PluginNotFoundError\nfrom meltano.core.plugin.factory import base_plugin_factory\nfrom meltano.core.plugin_discovery_service import PluginRepository\nfrom meltano.core.project import Project\nfrom meltano.core.project_settings_service import ProjectSettingsService\n\nfrom .schema import IndexedPlugin, VariantRef\n\nlogger = get_logger(__name__)\n\n\nclass HubPluginTypeNotFound(Exception):\n \"\"\"Raised when a Hub plugin type is not found.\"\"\"\n\n def __init__(self, plugin_type: PluginType):\n \"\"\"Create a new HubPluginVariantNotFound.\n\n Args:\n plugin_type: The type of the plugin.\n \"\"\"\n self.plugin_type = plugin_type\n\n def __str__(self) -> str:\n \"\"\"Return a string representation of the error.\n\n Returns:\n The string representation of the error.\n \"\"\"\n return \"{type} is not supported in Meltano Hub. Available plugin types: {types}\".format(\n type=self.plugin_type.descriptor.capitalize(),\n types=list(PluginType),\n )\n\n\nclass HubPluginVariantNotFound(Exception):\n \"\"\"Raised when a Hub plugin variant is not found.\"\"\"\n\n def __init__(\n self,\n plugin_type: PluginType,\n plugin: IndexedPlugin,\n variant_name: str,\n ):\n \"\"\"Create a new HubPluginVariantNotFound.\n\n Args:\n plugin_type: The type of the plugin.\n plugin: The indexed plugin.\n variant_name: The name of the variant that was not found.\n \"\"\"\n self.plugin_type = plugin_type\n self.plugin = plugin\n self.variant_name = variant_name\n\n def __str__(self) -> str:\n \"\"\"Return a string representation of the error.\n\n Returns:\n The string representation of the error.\n \"\"\"\n return \"{type} '{name}' variant '{variant}' is not known to Meltano. Variants: {variant_labels}\".format(\n type=self.plugin_type.descriptor.capitalize(),\n name=self.plugin.name,\n variant=self.variant_name,\n variant_labels=self.plugin.variant_labels,\n )\n\n\nclass MeltanoHubService(PluginRepository):\n \"\"\"PluginRepository implementation for the Meltano Hub.\"\"\"\n\n def __init__(self, project: Project) -> None:\n \"\"\"Initialize the service.\n\n Args:\n project: The Meltano project.\n \"\"\"\n self.project = project\n self.session = requests.Session()\n self.session.headers.update(\n {\n \"Accept\": \"application/json\",\n \"User-Agent\": f\"Meltano/{meltano.__version__}\",\n }\n )\n\n self.settings_service = ProjectSettingsService(self.project)\n\n if self.settings_service.get(\"send_anonymous_usage_stats\"):\n project_id = self.settings_service.get(\"project_id\")\n\n self.session.headers[\"X-Project-ID\"] = project_id\n\n @property\n def hub_api_url(self):\n \"\"\"Return the URL of the Hub API.\n\n Returns:\n The URL of the Hub API.\n \"\"\"\n hub_url = self.settings_service.get(\"hub_url\")\n return f\"{hub_url}/meltano/api/v1\"\n\n def plugin_type_endpoint(self, plugin_type: PluginType) -> str:\n \"\"\"Return the list endpoint for the given plugin type.\n\n Args:\n plugin_type: The plugin type.\n\n Returns:\n The endpoint for the given plugin type.\n \"\"\"\n return f\"{self.hub_api_url}/plugins/{plugin_type.value}/index\"\n\n def plugin_endpoint(\n self,\n plugin_type: PluginType,\n plugin_name: str,\n variant_name: str | None = None,\n ) -> str:\n \"\"\"Return the resource endpoint for the given plugin.\n\n Args:\n plugin_type: The plugin type.\n plugin_name: The plugin name.\n variant_name: The plugin variant name.\n\n Returns:\n The endpoint for the given plugin type.\n \"\"\"\n url = f\"{self.hub_api_url}/plugins/{plugin_type.value}/{plugin_name}\"\n if variant_name:\n url = f\"{url}--{variant_name}\"\n\n return url\n\n def find_definition(\n self,\n plugin_type: PluginType,\n plugin_name: str,\n variant_name: str | None = None,\n ) -> PluginDefinition:\n \"\"\"Find a locked plugin definition.\n\n Args:\n plugin_type: The plugin type.\n plugin_name: The plugin name.\n variant_name: The plugin variant name.\n\n Returns:\n The plugin definition.\n\n Raises:\n PluginNotFoundError: If the plugin definition could not be found.\n HubPluginVariantNotFound: If the plugin variant could not be found.\n \"\"\"\n plugins = self.get_plugins_of_type(plugin_type)\n\n try:\n plugin = plugins[plugin_name]\n except KeyError as plugins_key_err:\n raise PluginNotFoundError(\n PluginRef(plugin_type, plugin_name)\n ) from plugins_key_err\n\n if variant_name is None or variant_name == Variant.DEFAULT_NAME:\n variant_name = plugin.default_variant\n\n try:\n url = plugin.variants[variant_name].ref\n except KeyError as variant_key_err:\n raise HubPluginVariantNotFound(\n plugin_type, plugin, variant_name\n ) from variant_key_err\n\n response = self.session.get(url)\n\n try:\n response.raise_for_status()\n except requests.HTTPError as http_err:\n logger.error(\n \"Can not retrieve plugin\",\n status_code=http_err.response.status_code,\n error=http_err,\n )\n raise PluginNotFoundError(PluginRef(plugin_type, plugin_name)) from http_err\n\n return PluginDefinition(**response.json(), plugin_type=plugin_type)\n\n def find_base_plugin(\n self,\n plugin_type: PluginType,\n plugin_name: str,\n variant: str | None = None,\n ) -> BasePlugin:\n \"\"\"Get the base plugin for a project plugin.\n\n Args:\n plugin_type: The plugin type.\n plugin_name: The plugin name.\n variant: The plugin variant.\n\n Returns:\n The base plugin.\n \"\"\"\n plugin = self.find_definition(\n plugin_type,\n plugin_name,\n variant_name=variant,\n )\n\n return base_plugin_factory(plugin, plugin.variants[0])\n\n def get_plugins_of_type(self, plugin_type: PluginType) -> dict[str, IndexedPlugin]:\n \"\"\"Get all plugins of a given type.\n\n Args:\n plugin_type: The plugin type.\n\n Returns:\n The plugin definitions.\n\n Raises:\n HubPluginTypeNotFound: If the plugin type is not supported.\n \"\"\"\n if not plugin_type.discoverable:\n return {}\n\n url = self.plugin_type_endpoint(plugin_type)\n response = self.session.get(url)\n\n try:\n response.raise_for_status()\n except requests.HTTPError as err:\n logger.error(\n \"Can not retrieve plugin type\",\n status_code=err.response.status_code,\n error=err,\n )\n raise HubPluginTypeNotFound(plugin_type) from err\n\n plugins: dict[str, dict[str, Any]] = response.json()\n return {\n name: IndexedPlugin(\n name,\n logo_url=plugin[\"logo_url\"],\n default_variant=plugin[\"default_variant\"],\n variants={\n variant_name: VariantRef(variant_name, ref=variant[\"ref\"])\n for variant_name, variant in plugin[\"variants\"].items()\n },\n )\n for name, plugin in plugins.items()\n }\n", "path": "src/meltano/core/hub/client.py"}]}
3,240
514
gh_patches_debug_23166
rasdani/github-patches
git_diff
Kinto__kinto-1219
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Crash on DELETE /buckets when quota plugin is enabled Another collateral damage of #945 A `http DELETE /v1/buckets` when the quota plugin is enabled leads to: ``` File "/home/mathieu/Code/Mozilla/kinto/.venv/lib/python3.5/site-packages/zope/interface/adapter.py", line 598, in subscribers subscription(*objects) File "/home/mathieu/Code/Mozilla/kinto/.venv/lib/python3.5/site-packages/pyramid/config/adapters.py", line 130, in subscriber_wrapper return derived_subscriber(*arg) File "/home/mathieu/Code/Mozilla/kinto/.venv/lib/python3.5/site-packages/pyramid/config/adapters.py", line 103, in derived_subscriber return subscriber(arg[0]) File "/home/mathieu/Code/Mozilla/kinto/kinto/plugins/quotas/listener.py", line 47, in on_resource_changed bucket_id = payload['bucket_id'] KeyError: 'bucket_id' ``` Crash on DELETE /buckets when quota plugin is enabled Another collateral damage of #945 A `http DELETE /v1/buckets` when the quota plugin is enabled leads to: ``` File "/home/mathieu/Code/Mozilla/kinto/.venv/lib/python3.5/site-packages/zope/interface/adapter.py", line 598, in subscribers subscription(*objects) File "/home/mathieu/Code/Mozilla/kinto/.venv/lib/python3.5/site-packages/pyramid/config/adapters.py", line 130, in subscriber_wrapper return derived_subscriber(*arg) File "/home/mathieu/Code/Mozilla/kinto/.venv/lib/python3.5/site-packages/pyramid/config/adapters.py", line 103, in derived_subscriber return subscriber(arg[0]) File "/home/mathieu/Code/Mozilla/kinto/kinto/plugins/quotas/listener.py", line 47, in on_resource_changed bucket_id = payload['bucket_id'] KeyError: 'bucket_id' ``` </issue> <code> [start of kinto/plugins/quotas/listener.py] 1 import copy 2 3 from pyramid.httpexceptions import HTTPInsufficientStorage 4 from kinto.core.errors import http_error, ERRORS 5 from kinto.core.storage.exceptions import RecordNotFoundError 6 from kinto.core.utils import instance_uri 7 8 from .utils import record_size 9 10 11 QUOTA_RESOURCE_NAME = 'quota' 12 BUCKET_QUOTA_OBJECT_ID = 'bucket_info' 13 COLLECTION_QUOTA_OBJECT_ID = 'collection_info' 14 15 16 def get_bucket_settings(settings, bucket_id, name): 17 return settings.get( 18 # Bucket specific 19 'quotas.bucket_{}_{}'.format(bucket_id, name), 20 # Global to all buckets 21 settings.get('quotas.bucket_{}'.format(name), None)) 22 23 24 def get_collection_settings(settings, bucket_id, collection_id, name): 25 return settings.get( 26 # Specific for a given bucket collection 27 'quotas.collection_{}_{}_{}'.format(bucket_id, collection_id, name), 28 # Specific to given bucket collections 29 settings.get('quotas.collection_{}_{}'.format(bucket_id, name), 30 # Global to all buckets collections 31 settings.get('quotas.collection_{}'.format(name), None))) 32 33 34 def on_resource_changed(event): 35 """ 36 Everytime an object is created/changed/deleted, we update the 37 bucket counters. 38 39 If a new object exceeds the quotas, we reject the request. 40 """ 41 payload = event.payload 42 action = payload['action'] 43 resource_name = payload['resource_name'] 44 event_uri = payload['uri'] 45 46 settings = event.request.registry.settings 47 48 bucket_id = payload['bucket_id'] 49 bucket_uri = instance_uri(event.request, 'bucket', id=bucket_id) 50 collection_id = None 51 collection_uri = None 52 if 'collection_id' in payload: 53 collection_id = payload['collection_id'] 54 collection_uri = instance_uri(event.request, 55 'collection', 56 bucket_id=bucket_id, 57 id=collection_id) 58 59 bucket_max_bytes = get_bucket_settings(settings, bucket_id, 'max_bytes') 60 bucket_max_items = get_bucket_settings(settings, bucket_id, 'max_items') 61 bucket_max_bytes_per_item = get_bucket_settings(settings, bucket_id, 62 'max_bytes_per_item') 63 collection_max_bytes = get_collection_settings(settings, bucket_id, 64 collection_id, 'max_bytes') 65 collection_max_items = get_collection_settings(settings, bucket_id, 66 collection_id, 'max_items') 67 collection_max_bytes_per_item = get_collection_settings( 68 settings, bucket_id, collection_id, 'max_bytes_per_item') 69 70 max_bytes_per_item = (collection_max_bytes_per_item or 71 bucket_max_bytes_per_item) 72 73 storage = event.request.registry.storage 74 75 if action == 'delete' and resource_name == 'bucket': 76 # Deleting a bucket already deletes everything underneath (including 77 # quotas info). See kinto/views/bucket. 78 return 79 80 targets = [] 81 for impacted in event.impacted_records: 82 target = impacted['new' if action != 'delete' else 'old'] 83 # On POST .../records, the URI does not contain the newly created 84 # record id. 85 obj_id = target['id'] 86 parts = event_uri.split('/') 87 if resource_name in parts[-1]: 88 parts.append(obj_id) 89 else: 90 # Make sure the id is correct on grouped events. 91 parts[-1] = obj_id 92 uri = '/'.join(parts) 93 94 old = impacted.get('old', {}) 95 new = impacted.get('new', {}) 96 97 targets.append((uri, obj_id, old, new)) 98 99 try: 100 bucket_info = copy.deepcopy( 101 storage.get(parent_id=bucket_uri, 102 collection_id=QUOTA_RESOURCE_NAME, 103 object_id=BUCKET_QUOTA_OBJECT_ID)) 104 except RecordNotFoundError: 105 bucket_info = { 106 "collection_count": 0, 107 "record_count": 0, 108 "storage_size": 0, 109 } 110 111 collection_info = { 112 "record_count": 0, 113 "storage_size": 0, 114 } 115 if collection_id: 116 try: 117 collection_info = copy.deepcopy( 118 storage.get(parent_id=collection_uri, 119 collection_id=QUOTA_RESOURCE_NAME, 120 object_id=COLLECTION_QUOTA_OBJECT_ID)) 121 except RecordNotFoundError: 122 pass 123 124 # Update the bucket quotas values for each impacted record. 125 for (uri, obj_id, old, new) in targets: 126 old_size = record_size(old) 127 new_size = record_size(new) 128 129 if max_bytes_per_item is not None and action != "delete": 130 if new_size > max_bytes_per_item: 131 message = ("Maximum bytes per object exceeded " 132 "({} > {} Bytes.".format(new_size, max_bytes_per_item)) 133 raise http_error(HTTPInsufficientStorage(), 134 errno=ERRORS.FORBIDDEN.value, 135 message=message) 136 137 if action == 'create': 138 bucket_info['storage_size'] += new_size 139 if resource_name == 'collection': 140 bucket_info['collection_count'] += 1 141 collection_info['storage_size'] += new_size 142 if resource_name == 'record': 143 bucket_info['record_count'] += 1 144 collection_info['record_count'] += 1 145 collection_info['storage_size'] += new_size 146 elif action == 'update': 147 bucket_info['storage_size'] -= old_size 148 bucket_info['storage_size'] += new_size 149 if resource_name in ('collection', 'record'): 150 collection_info['storage_size'] -= old_size 151 collection_info['storage_size'] += new_size 152 else: # action == 'delete': 153 bucket_info['storage_size'] -= old_size 154 if resource_name == 'collection': 155 collection_uri = uri 156 bucket_info['collection_count'] -= 1 157 # When we delete the collection all the records in it 158 # are deleted without notification. 159 collection_records, _ = storage.get_all( 160 collection_id='record', 161 parent_id=collection_uri) 162 for r in collection_records: 163 old_record_size = record_size(r) 164 bucket_info['record_count'] -= 1 165 bucket_info['storage_size'] -= old_record_size 166 collection_info['record_count'] -= 1 167 collection_info['storage_size'] -= old_record_size 168 collection_info['storage_size'] -= old_size 169 170 if resource_name == 'record': 171 bucket_info['record_count'] -= 1 172 collection_info['record_count'] -= 1 173 collection_info['storage_size'] -= old_size 174 175 if bucket_max_bytes is not None: 176 if bucket_info['storage_size'] > bucket_max_bytes: 177 message = ("Bucket maximum total size exceeded " 178 "({} > {} Bytes). ".format(bucket_info['storage_size'], 179 bucket_max_bytes)) 180 raise http_error(HTTPInsufficientStorage(), 181 errno=ERRORS.FORBIDDEN.value, 182 message=message) 183 184 if bucket_max_items is not None: 185 if bucket_info['record_count'] > bucket_max_items: 186 message = ("Bucket maximum number of objects exceeded " 187 "({} > {} objects).".format(bucket_info['record_count'], 188 bucket_max_items)) 189 raise http_error(HTTPInsufficientStorage(), 190 errno=ERRORS.FORBIDDEN.value, 191 message=message) 192 193 if collection_max_bytes is not None: 194 if collection_info['storage_size'] > collection_max_bytes: 195 message = ("Collection maximum size exceeded " 196 "({} > {} Bytes).".format(collection_info['storage_size'], 197 collection_max_bytes)) 198 raise http_error(HTTPInsufficientStorage(), 199 errno=ERRORS.FORBIDDEN.value, 200 message=message) 201 202 if collection_max_items is not None: 203 if collection_info['record_count'] > collection_max_items: 204 message = ("Collection maximum number of objects exceeded " 205 "({} > {} objects).".format(collection_info['record_count'], 206 collection_max_items)) 207 raise http_error(HTTPInsufficientStorage(), 208 errno=ERRORS.FORBIDDEN.value, 209 message=message) 210 211 storage.update(parent_id=bucket_uri, 212 collection_id=QUOTA_RESOURCE_NAME, 213 object_id=BUCKET_QUOTA_OBJECT_ID, 214 record=bucket_info) 215 216 if collection_id: 217 if action == 'delete' and resource_name == 'collection': 218 # Deleting a collection already deletes everything underneath 219 # (including quotas info). See kinto/views/collection. 220 return 221 else: 222 storage.update(parent_id=collection_uri, 223 collection_id=QUOTA_RESOURCE_NAME, 224 object_id=COLLECTION_QUOTA_OBJECT_ID, 225 record=collection_info) 226 [end of kinto/plugins/quotas/listener.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kinto/plugins/quotas/listener.py b/kinto/plugins/quotas/listener.py --- a/kinto/plugins/quotas/listener.py +++ b/kinto/plugins/quotas/listener.py @@ -41,10 +41,15 @@ payload = event.payload action = payload['action'] resource_name = payload['resource_name'] - event_uri = payload['uri'] + + if action == 'delete' and resource_name == 'bucket': + # Deleting a bucket already deletes everything underneath (including + # quotas info). See kinto/views/bucket. + return settings = event.request.registry.settings + event_uri = payload['uri'] bucket_id = payload['bucket_id'] bucket_uri = instance_uri(event.request, 'bucket', id=bucket_id) collection_id = None @@ -72,11 +77,6 @@ storage = event.request.registry.storage - if action == 'delete' and resource_name == 'bucket': - # Deleting a bucket already deletes everything underneath (including - # quotas info). See kinto/views/bucket. - return - targets = [] for impacted in event.impacted_records: target = impacted['new' if action != 'delete' else 'old']
{"golden_diff": "diff --git a/kinto/plugins/quotas/listener.py b/kinto/plugins/quotas/listener.py\n--- a/kinto/plugins/quotas/listener.py\n+++ b/kinto/plugins/quotas/listener.py\n@@ -41,10 +41,15 @@\n payload = event.payload\n action = payload['action']\n resource_name = payload['resource_name']\n- event_uri = payload['uri']\n+\n+ if action == 'delete' and resource_name == 'bucket':\n+ # Deleting a bucket already deletes everything underneath (including\n+ # quotas info). See kinto/views/bucket.\n+ return\n \n settings = event.request.registry.settings\n \n+ event_uri = payload['uri']\n bucket_id = payload['bucket_id']\n bucket_uri = instance_uri(event.request, 'bucket', id=bucket_id)\n collection_id = None\n@@ -72,11 +77,6 @@\n \n storage = event.request.registry.storage\n \n- if action == 'delete' and resource_name == 'bucket':\n- # Deleting a bucket already deletes everything underneath (including\n- # quotas info). See kinto/views/bucket.\n- return\n-\n targets = []\n for impacted in event.impacted_records:\n target = impacted['new' if action != 'delete' else 'old']\n", "issue": "Crash on DELETE /buckets when quota plugin is enabled\nAnother collateral damage of #945 \r\n\r\nA `http DELETE /v1/buckets` when the quota plugin is enabled leads to:\r\n\r\n```\r\n File \"/home/mathieu/Code/Mozilla/kinto/.venv/lib/python3.5/site-packages/zope/interface/adapter.py\", line 598, in subscribers\r\n subscription(*objects)\r\n File \"/home/mathieu/Code/Mozilla/kinto/.venv/lib/python3.5/site-packages/pyramid/config/adapters.py\", line 130, in subscriber_wrapper\r\n return derived_subscriber(*arg)\r\n File \"/home/mathieu/Code/Mozilla/kinto/.venv/lib/python3.5/site-packages/pyramid/config/adapters.py\", line 103, in derived_subscriber\r\n return subscriber(arg[0])\r\n File \"/home/mathieu/Code/Mozilla/kinto/kinto/plugins/quotas/listener.py\", line 47, in on_resource_changed\r\n bucket_id = payload['bucket_id']\r\nKeyError: 'bucket_id'\r\n\r\n```\nCrash on DELETE /buckets when quota plugin is enabled\nAnother collateral damage of #945 \r\n\r\nA `http DELETE /v1/buckets` when the quota plugin is enabled leads to:\r\n\r\n```\r\n File \"/home/mathieu/Code/Mozilla/kinto/.venv/lib/python3.5/site-packages/zope/interface/adapter.py\", line 598, in subscribers\r\n subscription(*objects)\r\n File \"/home/mathieu/Code/Mozilla/kinto/.venv/lib/python3.5/site-packages/pyramid/config/adapters.py\", line 130, in subscriber_wrapper\r\n return derived_subscriber(*arg)\r\n File \"/home/mathieu/Code/Mozilla/kinto/.venv/lib/python3.5/site-packages/pyramid/config/adapters.py\", line 103, in derived_subscriber\r\n return subscriber(arg[0])\r\n File \"/home/mathieu/Code/Mozilla/kinto/kinto/plugins/quotas/listener.py\", line 47, in on_resource_changed\r\n bucket_id = payload['bucket_id']\r\nKeyError: 'bucket_id'\r\n\r\n```\n", "before_files": [{"content": "import copy\n\nfrom pyramid.httpexceptions import HTTPInsufficientStorage\nfrom kinto.core.errors import http_error, ERRORS\nfrom kinto.core.storage.exceptions import RecordNotFoundError\nfrom kinto.core.utils import instance_uri\n\nfrom .utils import record_size\n\n\nQUOTA_RESOURCE_NAME = 'quota'\nBUCKET_QUOTA_OBJECT_ID = 'bucket_info'\nCOLLECTION_QUOTA_OBJECT_ID = 'collection_info'\n\n\ndef get_bucket_settings(settings, bucket_id, name):\n return settings.get(\n # Bucket specific\n 'quotas.bucket_{}_{}'.format(bucket_id, name),\n # Global to all buckets\n settings.get('quotas.bucket_{}'.format(name), None))\n\n\ndef get_collection_settings(settings, bucket_id, collection_id, name):\n return settings.get(\n # Specific for a given bucket collection\n 'quotas.collection_{}_{}_{}'.format(bucket_id, collection_id, name),\n # Specific to given bucket collections\n settings.get('quotas.collection_{}_{}'.format(bucket_id, name),\n # Global to all buckets collections\n settings.get('quotas.collection_{}'.format(name), None)))\n\n\ndef on_resource_changed(event):\n \"\"\"\n Everytime an object is created/changed/deleted, we update the\n bucket counters.\n\n If a new object exceeds the quotas, we reject the request.\n \"\"\"\n payload = event.payload\n action = payload['action']\n resource_name = payload['resource_name']\n event_uri = payload['uri']\n\n settings = event.request.registry.settings\n\n bucket_id = payload['bucket_id']\n bucket_uri = instance_uri(event.request, 'bucket', id=bucket_id)\n collection_id = None\n collection_uri = None\n if 'collection_id' in payload:\n collection_id = payload['collection_id']\n collection_uri = instance_uri(event.request,\n 'collection',\n bucket_id=bucket_id,\n id=collection_id)\n\n bucket_max_bytes = get_bucket_settings(settings, bucket_id, 'max_bytes')\n bucket_max_items = get_bucket_settings(settings, bucket_id, 'max_items')\n bucket_max_bytes_per_item = get_bucket_settings(settings, bucket_id,\n 'max_bytes_per_item')\n collection_max_bytes = get_collection_settings(settings, bucket_id,\n collection_id, 'max_bytes')\n collection_max_items = get_collection_settings(settings, bucket_id,\n collection_id, 'max_items')\n collection_max_bytes_per_item = get_collection_settings(\n settings, bucket_id, collection_id, 'max_bytes_per_item')\n\n max_bytes_per_item = (collection_max_bytes_per_item or\n bucket_max_bytes_per_item)\n\n storage = event.request.registry.storage\n\n if action == 'delete' and resource_name == 'bucket':\n # Deleting a bucket already deletes everything underneath (including\n # quotas info). See kinto/views/bucket.\n return\n\n targets = []\n for impacted in event.impacted_records:\n target = impacted['new' if action != 'delete' else 'old']\n # On POST .../records, the URI does not contain the newly created\n # record id.\n obj_id = target['id']\n parts = event_uri.split('/')\n if resource_name in parts[-1]:\n parts.append(obj_id)\n else:\n # Make sure the id is correct on grouped events.\n parts[-1] = obj_id\n uri = '/'.join(parts)\n\n old = impacted.get('old', {})\n new = impacted.get('new', {})\n\n targets.append((uri, obj_id, old, new))\n\n try:\n bucket_info = copy.deepcopy(\n storage.get(parent_id=bucket_uri,\n collection_id=QUOTA_RESOURCE_NAME,\n object_id=BUCKET_QUOTA_OBJECT_ID))\n except RecordNotFoundError:\n bucket_info = {\n \"collection_count\": 0,\n \"record_count\": 0,\n \"storage_size\": 0,\n }\n\n collection_info = {\n \"record_count\": 0,\n \"storage_size\": 0,\n }\n if collection_id:\n try:\n collection_info = copy.deepcopy(\n storage.get(parent_id=collection_uri,\n collection_id=QUOTA_RESOURCE_NAME,\n object_id=COLLECTION_QUOTA_OBJECT_ID))\n except RecordNotFoundError:\n pass\n\n # Update the bucket quotas values for each impacted record.\n for (uri, obj_id, old, new) in targets:\n old_size = record_size(old)\n new_size = record_size(new)\n\n if max_bytes_per_item is not None and action != \"delete\":\n if new_size > max_bytes_per_item:\n message = (\"Maximum bytes per object exceeded \"\n \"({} > {} Bytes.\".format(new_size, max_bytes_per_item))\n raise http_error(HTTPInsufficientStorage(),\n errno=ERRORS.FORBIDDEN.value,\n message=message)\n\n if action == 'create':\n bucket_info['storage_size'] += new_size\n if resource_name == 'collection':\n bucket_info['collection_count'] += 1\n collection_info['storage_size'] += new_size\n if resource_name == 'record':\n bucket_info['record_count'] += 1\n collection_info['record_count'] += 1\n collection_info['storage_size'] += new_size\n elif action == 'update':\n bucket_info['storage_size'] -= old_size\n bucket_info['storage_size'] += new_size\n if resource_name in ('collection', 'record'):\n collection_info['storage_size'] -= old_size\n collection_info['storage_size'] += new_size\n else: # action == 'delete':\n bucket_info['storage_size'] -= old_size\n if resource_name == 'collection':\n collection_uri = uri\n bucket_info['collection_count'] -= 1\n # When we delete the collection all the records in it\n # are deleted without notification.\n collection_records, _ = storage.get_all(\n collection_id='record',\n parent_id=collection_uri)\n for r in collection_records:\n old_record_size = record_size(r)\n bucket_info['record_count'] -= 1\n bucket_info['storage_size'] -= old_record_size\n collection_info['record_count'] -= 1\n collection_info['storage_size'] -= old_record_size\n collection_info['storage_size'] -= old_size\n\n if resource_name == 'record':\n bucket_info['record_count'] -= 1\n collection_info['record_count'] -= 1\n collection_info['storage_size'] -= old_size\n\n if bucket_max_bytes is not None:\n if bucket_info['storage_size'] > bucket_max_bytes:\n message = (\"Bucket maximum total size exceeded \"\n \"({} > {} Bytes). \".format(bucket_info['storage_size'],\n bucket_max_bytes))\n raise http_error(HTTPInsufficientStorage(),\n errno=ERRORS.FORBIDDEN.value,\n message=message)\n\n if bucket_max_items is not None:\n if bucket_info['record_count'] > bucket_max_items:\n message = (\"Bucket maximum number of objects exceeded \"\n \"({} > {} objects).\".format(bucket_info['record_count'],\n bucket_max_items))\n raise http_error(HTTPInsufficientStorage(),\n errno=ERRORS.FORBIDDEN.value,\n message=message)\n\n if collection_max_bytes is not None:\n if collection_info['storage_size'] > collection_max_bytes:\n message = (\"Collection maximum size exceeded \"\n \"({} > {} Bytes).\".format(collection_info['storage_size'],\n collection_max_bytes))\n raise http_error(HTTPInsufficientStorage(),\n errno=ERRORS.FORBIDDEN.value,\n message=message)\n\n if collection_max_items is not None:\n if collection_info['record_count'] > collection_max_items:\n message = (\"Collection maximum number of objects exceeded \"\n \"({} > {} objects).\".format(collection_info['record_count'],\n collection_max_items))\n raise http_error(HTTPInsufficientStorage(),\n errno=ERRORS.FORBIDDEN.value,\n message=message)\n\n storage.update(parent_id=bucket_uri,\n collection_id=QUOTA_RESOURCE_NAME,\n object_id=BUCKET_QUOTA_OBJECT_ID,\n record=bucket_info)\n\n if collection_id:\n if action == 'delete' and resource_name == 'collection':\n # Deleting a collection already deletes everything underneath\n # (including quotas info). See kinto/views/collection.\n return\n else:\n storage.update(parent_id=collection_uri,\n collection_id=QUOTA_RESOURCE_NAME,\n object_id=COLLECTION_QUOTA_OBJECT_ID,\n record=collection_info)\n", "path": "kinto/plugins/quotas/listener.py"}]}
3,380
286
gh_patches_debug_228
rasdani/github-patches
git_diff
mlcommons__GaNDLF-766
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `gdown` does not seem to be working **Describe the bug** Current CI seems to be broken. **To Reproduce** Steps to reproduce the behavior: 1. Run any CI test 2. See error: ```python-traceback [SNIP!] if gdrive_file_id and is_gdrive_download_link: content_disposition = six.moves.urllib_parse.unquote( res.headers["Content-Disposition"] ) m = re.search(r"filename\*=UTF-8''(.*)", content_disposition) > filename_from_url = m.groups()[0] E AttributeError: 'NoneType' object has no attribute 'groups' ``` Example: https://github.com/mlcommons/GaNDLF/actions/runs/7489779631/job/20387346791?pr=764#step:9:219 **Expected behavior** The sample data file download should work. **Screenshots** N.A. **GaNDLF Version** Current master **Desktop (please complete the following information):** N.A. **Additional context** Basically, it is this error: https://github.com/wkentaro/gdown/issues/291 </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 3 """The setup script.""" 4 5 6 import sys, re, os 7 from setuptools import setup, find_packages 8 from setuptools.command.install import install 9 from setuptools.command.develop import develop 10 from setuptools.command.egg_info import egg_info 11 12 try: 13 with open("README.md") as readme_file: 14 readme = readme_file.read() 15 except Exception as error: 16 readme = "No README information found." 17 sys.stderr.write( 18 "Warning: Could not open '%s' due %s\n" % ("README.md", error) 19 ) 20 21 22 class CustomInstallCommand(install): 23 def run(self): 24 install.run(self) 25 26 27 class CustomDevelopCommand(develop): 28 def run(self): 29 develop.run(self) 30 31 32 class CustomEggInfoCommand(egg_info): 33 def run(self): 34 egg_info.run(self) 35 36 37 try: 38 filepath = "GANDLF/version.py" 39 version_file = open(filepath) 40 (__version__,) = re.findall('__version__ = "(.*)"', version_file.read()) 41 42 except Exception as error: 43 __version__ = "0.0.1" 44 sys.stderr.write( 45 "Warning: Could not open '%s' due %s\n" % (filepath, error) 46 ) 47 48 # Handle cases where specific files need to be bundled into the final package as installed via PyPI 49 dockerfiles = [ 50 item 51 for item in os.listdir(os.path.dirname(os.path.abspath(__file__))) 52 if (os.path.isfile(item) and item.startswith("Dockerfile-")) 53 ] 54 entrypoint_files = [ 55 item 56 for item in os.listdir(os.path.dirname(os.path.abspath(__file__))) 57 if (os.path.isfile(item) and item.startswith("gandlf_")) 58 ] 59 setup_files = ["setup.py", ".dockerignore", "pyproject.toml", "MANIFEST.in"] 60 all_extra_files = dockerfiles + entrypoint_files + setup_files 61 all_extra_files_pathcorrected = [ 62 os.path.join("../", item) for item in all_extra_files 63 ] 64 # find_packages should only ever find these as subpackages of gandlf, not as top-level packages 65 # generate this dynamically? 66 # GANDLF.GANDLF is needed to prevent recursion madness in deployments 67 toplevel_package_excludes = [ 68 "GANDLF.GANDLF", 69 "anonymize", 70 "cli", 71 "compute", 72 "data", 73 "grad_clipping", 74 "losses", 75 "metrics", 76 "models", 77 "optimizers", 78 "schedulers", 79 "utils", 80 ] 81 82 83 requirements = [ 84 "torch==2.1.0", 85 "black==23.11.0", 86 "numpy==1.25.0", 87 "scipy", 88 "SimpleITK!=2.0.*", 89 "SimpleITK!=2.2.1", # https://github.com/mlcommons/GaNDLF/issues/536 90 "torchvision", 91 "tqdm", 92 "torchio==0.19.3", 93 "pandas>=2.0.0", 94 "scikit-learn>=0.23.2", 95 "scikit-image>=0.19.1", 96 "setuptools", 97 "seaborn", 98 "pyyaml", 99 "tiffslide", 100 "matplotlib", 101 "gdown", 102 "pytest", 103 "coverage", 104 "pytest-cov", 105 "psutil", 106 "medcam", 107 "opencv-python", 108 "torchmetrics==1.1.2", 109 "zarr==2.10.3", 110 "pydicom", 111 "onnx", 112 "torchinfo==1.7.0", 113 "segmentation-models-pytorch==0.3.3", 114 "ACSConv==0.1.1", 115 "docker", 116 "dicom-anonymizer", 117 "twine", 118 "zarr", 119 "keyring", 120 ] 121 122 if __name__ == "__main__": 123 setup( 124 name="GANDLF", 125 version=__version__, 126 author="MLCommons", 127 author_email="[email protected]", 128 python_requires=">3.8, <3.12", 129 packages=find_packages( 130 where=os.path.dirname(os.path.abspath(__file__)), 131 exclude=toplevel_package_excludes, 132 ), 133 cmdclass={ 134 "install": CustomInstallCommand, 135 "develop": CustomDevelopCommand, 136 "egg_info": CustomEggInfoCommand, 137 }, 138 scripts=[ 139 "gandlf_run", 140 "gandlf_constructCSV", 141 "gandlf_collectStats", 142 "gandlf_patchMiner", 143 "gandlf_preprocess", 144 "gandlf_anonymizer", 145 "gandlf_verifyInstall", 146 "gandlf_configGenerator", 147 "gandlf_recoverConfig", 148 "gandlf_deploy", 149 "gandlf_optimizeModel", 150 "gandlf_generateMetrics", 151 ], 152 classifiers=[ 153 "Development Status :: 3 - Alpha", 154 "Intended Audience :: Science/Research", 155 "License :: OSI Approved :: Apache Software License", 156 "Natural Language :: English", 157 "Operating System :: OS Independent", 158 "Programming Language :: Python :: 3.9", 159 "Programming Language :: Python :: 3.10", 160 "Programming Language :: Python :: 3.11", 161 "Topic :: Scientific/Engineering :: Medical Science Apps.", 162 ], 163 description=( 164 "PyTorch-based framework that handles segmentation/regression/classification using various DL architectures for medical imaging." 165 ), 166 install_requires=requirements, 167 license="Apache-2.0", 168 long_description=readme, 169 long_description_content_type="text/markdown", 170 include_package_data=True, 171 package_data={"GANDLF": all_extra_files_pathcorrected}, 172 keywords="semantic, segmentation, regression, classification, data-augmentation, medical-imaging, clinical-workflows, deep-learning, pytorch", 173 zip_safe=False, 174 ) 175 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -98,7 +98,7 @@ "pyyaml", "tiffslide", "matplotlib", - "gdown", + "gdown==4.6.3", "pytest", "coverage", "pytest-cov",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -98,7 +98,7 @@\n \"pyyaml\",\n \"tiffslide\",\n \"matplotlib\",\n- \"gdown\",\n+ \"gdown==4.6.3\",\n \"pytest\",\n \"coverage\",\n \"pytest-cov\",\n", "issue": "`gdown` does not seem to be working\n**Describe the bug**\r\nCurrent CI seems to be broken.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Run any CI test\r\n2. See error:\r\n```python-traceback\r\n[SNIP!]\r\n if gdrive_file_id and is_gdrive_download_link:\r\n content_disposition = six.moves.urllib_parse.unquote(\r\n res.headers[\"Content-Disposition\"]\r\n )\r\n m = re.search(r\"filename\\*=UTF-8''(.*)\", content_disposition)\r\n> filename_from_url = m.groups()[0]\r\nE AttributeError: 'NoneType' object has no attribute 'groups'\r\n```\r\nExample: https://github.com/mlcommons/GaNDLF/actions/runs/7489779631/job/20387346791?pr=764#step:9:219\r\n\r\n**Expected behavior**\r\nThe sample data file download should work.\r\n\r\n**Screenshots**\r\nN.A.\r\n\r\n**GaNDLF Version**\r\nCurrent master\r\n\r\n**Desktop (please complete the following information):**\r\nN.A.\r\n\r\n**Additional context**\r\nBasically, it is this error: https://github.com/wkentaro/gdown/issues/291\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"The setup script.\"\"\"\n\n\nimport sys, re, os\nfrom setuptools import setup, find_packages\nfrom setuptools.command.install import install\nfrom setuptools.command.develop import develop\nfrom setuptools.command.egg_info import egg_info\n\ntry:\n with open(\"README.md\") as readme_file:\n readme = readme_file.read()\nexcept Exception as error:\n readme = \"No README information found.\"\n sys.stderr.write(\n \"Warning: Could not open '%s' due %s\\n\" % (\"README.md\", error)\n )\n\n\nclass CustomInstallCommand(install):\n def run(self):\n install.run(self)\n\n\nclass CustomDevelopCommand(develop):\n def run(self):\n develop.run(self)\n\n\nclass CustomEggInfoCommand(egg_info):\n def run(self):\n egg_info.run(self)\n\n\ntry:\n filepath = \"GANDLF/version.py\"\n version_file = open(filepath)\n (__version__,) = re.findall('__version__ = \"(.*)\"', version_file.read())\n\nexcept Exception as error:\n __version__ = \"0.0.1\"\n sys.stderr.write(\n \"Warning: Could not open '%s' due %s\\n\" % (filepath, error)\n )\n\n# Handle cases where specific files need to be bundled into the final package as installed via PyPI\ndockerfiles = [\n item\n for item in os.listdir(os.path.dirname(os.path.abspath(__file__)))\n if (os.path.isfile(item) and item.startswith(\"Dockerfile-\"))\n]\nentrypoint_files = [\n item\n for item in os.listdir(os.path.dirname(os.path.abspath(__file__)))\n if (os.path.isfile(item) and item.startswith(\"gandlf_\"))\n]\nsetup_files = [\"setup.py\", \".dockerignore\", \"pyproject.toml\", \"MANIFEST.in\"]\nall_extra_files = dockerfiles + entrypoint_files + setup_files\nall_extra_files_pathcorrected = [\n os.path.join(\"../\", item) for item in all_extra_files\n]\n# find_packages should only ever find these as subpackages of gandlf, not as top-level packages\n# generate this dynamically?\n# GANDLF.GANDLF is needed to prevent recursion madness in deployments\ntoplevel_package_excludes = [\n \"GANDLF.GANDLF\",\n \"anonymize\",\n \"cli\",\n \"compute\",\n \"data\",\n \"grad_clipping\",\n \"losses\",\n \"metrics\",\n \"models\",\n \"optimizers\",\n \"schedulers\",\n \"utils\",\n]\n\n\nrequirements = [\n \"torch==2.1.0\",\n \"black==23.11.0\",\n \"numpy==1.25.0\",\n \"scipy\",\n \"SimpleITK!=2.0.*\",\n \"SimpleITK!=2.2.1\", # https://github.com/mlcommons/GaNDLF/issues/536\n \"torchvision\",\n \"tqdm\",\n \"torchio==0.19.3\",\n \"pandas>=2.0.0\",\n \"scikit-learn>=0.23.2\",\n \"scikit-image>=0.19.1\",\n \"setuptools\",\n \"seaborn\",\n \"pyyaml\",\n \"tiffslide\",\n \"matplotlib\",\n \"gdown\",\n \"pytest\",\n \"coverage\",\n \"pytest-cov\",\n \"psutil\",\n \"medcam\",\n \"opencv-python\",\n \"torchmetrics==1.1.2\",\n \"zarr==2.10.3\",\n \"pydicom\",\n \"onnx\",\n \"torchinfo==1.7.0\",\n \"segmentation-models-pytorch==0.3.3\",\n \"ACSConv==0.1.1\",\n \"docker\",\n \"dicom-anonymizer\",\n \"twine\",\n \"zarr\",\n \"keyring\",\n]\n\nif __name__ == \"__main__\":\n setup(\n name=\"GANDLF\",\n version=__version__,\n author=\"MLCommons\",\n author_email=\"[email protected]\",\n python_requires=\">3.8, <3.12\",\n packages=find_packages(\n where=os.path.dirname(os.path.abspath(__file__)),\n exclude=toplevel_package_excludes,\n ),\n cmdclass={\n \"install\": CustomInstallCommand,\n \"develop\": CustomDevelopCommand,\n \"egg_info\": CustomEggInfoCommand,\n },\n scripts=[\n \"gandlf_run\",\n \"gandlf_constructCSV\",\n \"gandlf_collectStats\",\n \"gandlf_patchMiner\",\n \"gandlf_preprocess\",\n \"gandlf_anonymizer\",\n \"gandlf_verifyInstall\",\n \"gandlf_configGenerator\",\n \"gandlf_recoverConfig\",\n \"gandlf_deploy\",\n \"gandlf_optimizeModel\",\n \"gandlf_generateMetrics\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Topic :: Scientific/Engineering :: Medical Science Apps.\",\n ],\n description=(\n \"PyTorch-based framework that handles segmentation/regression/classification using various DL architectures for medical imaging.\"\n ),\n install_requires=requirements,\n license=\"Apache-2.0\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n include_package_data=True,\n package_data={\"GANDLF\": all_extra_files_pathcorrected},\n keywords=\"semantic, segmentation, regression, classification, data-augmentation, medical-imaging, clinical-workflows, deep-learning, pytorch\",\n zip_safe=False,\n )\n", "path": "setup.py"}]}
2,494
79
gh_patches_debug_14956
rasdani/github-patches
git_diff
holoviz__panel-3803
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Enable or document how to render options in a List Parameter as Literal elements ## Request Enable or document how to render options in a List Parameter as Literal elements ## Motivation I'm trying to show a user how to create a custom dropdown component using ReactiveHTML in https://discourse.holoviz.org/t/customize-panel-components-widgets-design/4187. There is already an example of a *child* template/ dropdown here https://panel.holoviz.org/user_guide/Custom_Components.html#child-templates Unfortunately the drop down options are not rendered as Literal values but instead as panels, i.e. wrapping the literal value in `bk` `div`s. In my example it makes the dropdown not look nice because a lot of margin is added. ![image](https://user-images.githubusercontent.com/42288570/188283341-7364227c-03d1-4aaf-ab07-b349acf4c214.png) ```python from panel.reactive import ReactiveHTML import param class ShoelaceSelect(ReactiveHTML): value = param.Parameter() options = param.List() _template = """ <sl-dropdown> <sl-button slot="trigger" caret>Dropdown</sl-button> <sl-menu> {% for obj in options %} <sl-menu-item id="option">${obj}</sl-menu-item> {% endfor %} </sl-menu> </sl-dropdown> """ __javascript_modules__=["https://cdn.jsdelivr.net/npm/@shoelace-style/[email protected]/dist/shoelace.js"] __css__ = ["https://cdn.jsdelivr.net/npm/@shoelace-style/[email protected]/dist/themes/light.css"] # _child_config = {'obj': 'literal'} import panel as pn pn.extension() select = ShoelaceSelect(options=["apple", "pear", "banana"]) pn.Column(select).servable() ``` I simple cannot find any documentation or example telling me how to provide these a literal values. </issue> <code> [start of panel/models/reactive_html.py] 1 import difflib 2 import re 3 4 from collections import defaultdict 5 from html.parser import HTMLParser 6 7 import bokeh.core.properties as bp 8 9 from bokeh.events import ModelEvent 10 from bokeh.model import DataModel 11 from bokeh.models import HTMLBox, LayoutDOM 12 13 endfor = '{%-? endfor -?%}' 14 list_iter_re = r'{%-? for (\s*[A-Za-z_]\w*\s*) in (\s*[A-Za-z_]\w*\s*) -?%}' 15 items_iter_re = r'{%-? for \s*[A-Za-z_]\w*\s*, (\s*[A-Za-z_]\w*\s*) in (\s*[A-Za-z_]\w*\s*)\.items\(\) -?%}' 16 values_iter_re = r'{%-? for (\s*[A-Za-z_]\w*\s*) in (\s*[A-Za-z_]\w*\s*)\.values\(\) -?%}' 17 18 19 class ReactiveHTMLParser(HTMLParser): 20 21 def __init__(self, cls, template=True): 22 super().__init__() 23 self.template = template 24 self.cls = cls 25 self.attrs = defaultdict(list) 26 self.children = {} 27 self.nodes = [] 28 self.looped = [] 29 self._template_re = re.compile(r'\$\{[^}]+\}') 30 self._literal_re = re.compile(r'\{\{[^}]+\}\}') 31 self._current_node = None 32 self._node_stack = [] 33 self._open_for = False 34 self.loop_map = {} 35 self.loop_var_map = defaultdict(list) 36 37 def handle_starttag(self, tag, attrs): 38 attrs = dict(attrs) 39 dom_id = attrs.pop('id', None) 40 self._current_node = None 41 self._node_stack.append((tag, dom_id)) 42 43 if not dom_id: 44 for attr, value in attrs.items(): 45 if value is None: 46 continue 47 params, methods = [], [] 48 for match in self._template_re.findall(value): 49 match = match[2:-1] 50 if match.startswith('model.'): 51 continue 52 if match in self.cls.param: 53 params.append(match) 54 elif hasattr(self.cls, match): 55 methods.append(match) 56 if methods: 57 raise ValueError( 58 "DOM nodes with an attached callback must declare " 59 f"an id. Found <{tag}> node with the `{attr}` callback " 60 f"referencing the `{methods[0]}` method. Add an id " 61 "attribute like this: " 62 f"<{tag} id=\"{tag}\" {attr}=\"${{{methods[0]}}}>...</{tag}>." 63 ) 64 elif params: 65 literal = value.replace(f'${{{params[0]}}}', f'{{{{{params[0]}}}}}') 66 raise ValueError( 67 "DOM node with a linked parameter declaration " 68 f"must declare an id. Found <{tag}> node with " 69 f"the `{attr}` attribute referencing the `{params[0]}` " 70 "parameter. Either declare an id on the node, " 71 f"i.e. <{tag} id=\"{tag}\" {attr}=\"{value}\">...</{tag}>, " 72 "or insert the value as a literal: " 73 f"<{tag} {attr}=\"{literal}\">...</{tag}>." 74 ) 75 return 76 77 if dom_id in self.nodes: 78 raise ValueError(f'Multiple DOM nodes with id="{dom_id}" found.') 79 self._current_node = dom_id 80 self.nodes.append(dom_id) 81 for attr, value in attrs.items(): 82 if value is None: 83 continue 84 matches = [] 85 for match in self._template_re.findall(value): 86 if not match[2:-1].startswith('model.'): 87 matches.append(match[2:-1]) 88 if matches: 89 self.attrs[dom_id].append((attr, matches, value.replace('${', '{'))) 90 91 def handle_endtag(self, tag): 92 self._node_stack.pop() 93 self._current_node = self._node_stack[-1][1] if self._node_stack else None 94 95 def handle_data(self, data): 96 if not self.template: 97 return 98 99 dom_id = self._current_node 100 matches = [] 101 for match in self._template_re.findall(data): 102 var = match[2:-1].strip() 103 if match[2:-1] not in self.loop_var_map[var]: 104 self.loop_var_map[var].append(match[2:-1]) 105 if var.endswith('.index0'): 106 matches.append('${%s }}]}' % var) 107 else: 108 matches.append('${%s}' % var) 109 110 literal_matches = [] 111 for match in self._literal_re.findall(data): 112 match = match[2:-2].strip() 113 if match.endswith('.index0'): 114 literal_matches.append('{{%s }}]}' % match) 115 else: 116 literal_matches.append('{{ %s }}' % match) 117 118 # Detect templating for loops 119 list_loop = re.findall(list_iter_re, data) 120 values_loop = re.findall(values_iter_re, data) 121 items_loop = re.findall(items_iter_re, data) 122 nloops = len(list_loop) + len(values_loop) + len(items_loop) 123 if nloops > 1 and nloops and self._open_for: 124 raise ValueError('Nested for loops currently not supported in templates.') 125 elif nloops: 126 loop = [loop for loop in (list_loop, values_loop, items_loop) if loop][0] 127 var, obj = loop[0] 128 if var in self.cls.param: 129 raise ValueError( 130 f'Loop variable {var} clashes with parameter name. ' 131 'Ensure loop variables have a unique name. Relevant ' 132 f'template section:\n\n{data}' 133 ) 134 self.loop_map[var] = obj 135 136 open_for = re.search(r'{%-? for', data) 137 end_for = re.search(endfor, data) 138 if open_for: 139 if self._current_node is None: 140 node = self._node_stack[-1][0] 141 raise ValueError( 142 'Loops may only be used inside a DOM node with an assigned ID. ' 143 f'The following loop could not be expanded because the <{node}> node ' 144 f'did not have an assigned id:\n\n {data.strip()}' 145 ) 146 self._open_for = True 147 if end_for and (not nloops or end_for.start() > open_for.start()): 148 self._open_for = False 149 150 if self._current_node and literal_matches: 151 if len(literal_matches) == 1: 152 literal_match = literal_matches[0][2:-2].strip() 153 else: 154 literal_match = None 155 156 if literal_match and (literal_match in self.loop_map) and self._open_for: 157 literal_match = self.loop_map[literal_match] 158 self.looped.append((dom_id, literal_match)) 159 160 if not (self._current_node and matches): 161 return 162 163 if len(matches) == 1: 164 match = matches[0][2:-1].strip() 165 else: 166 for match in matches: 167 mode = self.cls._child_config.get(match, 'model') 168 if mode != 'template': 169 raise ValueError(f"Cannot match multiple variables in '{mode}' mode.") 170 match = None 171 172 # Handle looped variables 173 if match and (match in self.loop_map or '[' in match) and self._open_for: 174 if match in self.loop_map: 175 matches[matches.index('${%s}' % match)] = '${%s}' % self.loop_map[match] 176 match = self.loop_map[match] 177 elif '[' in match: 178 match, _ = match.split('[') 179 dom_id = dom_id.replace('-{{ loop.index0 }}', '') 180 self.looped.append((dom_id, match)) 181 182 mode = self.cls._child_config.get(match, 'model') 183 if match in self.cls.param and mode != 'template': 184 self.children[dom_id] = match 185 return 186 187 templates = [] 188 for match in matches: 189 match = match[2:-1] 190 if match.startswith('model.'): 191 continue 192 if match not in self.cls.param and '.' not in match: 193 params = difflib.get_close_matches(match, list(self.cls.param)) 194 raise ValueError(f"{self.cls.__name__} HTML template references " 195 f"unknown parameter '{match}', similar parameters " 196 f"include {params}.") 197 templates.append(match) 198 self.attrs[dom_id].append(('children', templates, data.replace('${', '{'))) 199 200 201 202 def find_attrs(html): 203 p = ReactiveHTMLParser() 204 p.feed(html) 205 return p.attrs 206 207 208 209 class DOMEvent(ModelEvent): 210 211 event_name = 'dom_event' 212 213 def __init__(self, model, node=None, data=None): 214 self.data = data 215 self.node = node 216 super().__init__(model=model) 217 218 219 class ReactiveHTML(HTMLBox): 220 221 attrs = bp.Dict(bp.String, bp.List(bp.Tuple(bp.String, bp.List(bp.String), bp.String))) 222 223 callbacks = bp.Dict(bp.String, bp.List(bp.Tuple(bp.String, bp.String))) 224 225 children = bp.Dict(bp.String, bp.Either(bp.List(bp.Either(bp.Instance(LayoutDOM), bp.String)), bp.String)) 226 227 data = bp.Instance(DataModel) 228 229 events = bp.Dict(bp.String, bp.Dict(bp.String, bp.Bool)) 230 231 html = bp.String() 232 233 looped = bp.List(bp.String) 234 235 nodes = bp.List(bp.String) 236 237 scripts = bp.Dict(bp.String, bp.List(bp.String)) 238 239 def __init__(self, **props): 240 if 'attrs' not in props and 'html' in props: 241 props['attrs'] = find_attrs(props['html']) 242 super().__init__(**props) 243 [end of panel/models/reactive_html.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/panel/models/reactive_html.py b/panel/models/reactive_html.py --- a/panel/models/reactive_html.py +++ b/panel/models/reactive_html.py @@ -136,10 +136,11 @@ match = None # Handle looped variables - if match and (match in self.loop_map or '[' in match) and self._open_for: - if match in self.loop_map: - matches[matches.index('${%s}' % match)] = '${%s}' % self.loop_map[match] - match = self.loop_map[match] + if match and (match.strip() in self.loop_map or '[' in match) and self._open_for: + if match.strip() in self.loop_map: + loop_match = self.loop_map[match.strip()] + matches[matches.index('${%s}' % match)] = '${%s}' % loop_match + match = loop_match elif '[' in match: match, _ = match.split('[') dom_id = dom_id.replace('-{{ loop.index0 }}', '')
{"golden_diff": "diff --git a/panel/models/reactive_html.py b/panel/models/reactive_html.py\n--- a/panel/models/reactive_html.py\n+++ b/panel/models/reactive_html.py\n@@ -136,10 +136,11 @@\n match = None\n \n # Handle looped variables\n- if match and (match in self.loop_map or '[' in match) and self._open_for:\n- if match in self.loop_map:\n- matches[matches.index('${%s}' % match)] = '${%s}' % self.loop_map[match]\n- match = self.loop_map[match]\n+ if match and (match.strip() in self.loop_map or '[' in match) and self._open_for:\n+ if match.strip() in self.loop_map:\n+ loop_match = self.loop_map[match.strip()]\n+ matches[matches.index('${%s}' % match)] = '${%s}' % loop_match\n+ match = loop_match\n elif '[' in match:\n match, _ = match.split('[')\n dom_id = dom_id.replace('-{{ loop.index0 }}', '')\n", "issue": "Enable or document how to render options in a List Parameter as Literal elements\n## Request\r\n\r\nEnable or document how to render options in a List Parameter as Literal elements\r\n\r\n## Motivation\r\n\r\nI'm trying to show a user how to create a custom dropdown component using ReactiveHTML in https://discourse.holoviz.org/t/customize-panel-components-widgets-design/4187.\r\n\r\nThere is already an example of a *child* template/ dropdown here https://panel.holoviz.org/user_guide/Custom_Components.html#child-templates\r\n\r\nUnfortunately the drop down options are not rendered as Literal values but instead as panels, i.e. wrapping the literal value in `bk` `div`s.\r\n\r\nIn my example it makes the dropdown not look nice because a lot of margin is added.\r\n\r\n![image](https://user-images.githubusercontent.com/42288570/188283341-7364227c-03d1-4aaf-ab07-b349acf4c214.png)\r\n\r\n```python\r\nfrom panel.reactive import ReactiveHTML\r\nimport param\r\n\r\nclass ShoelaceSelect(ReactiveHTML):\r\n value = param.Parameter()\r\n options = param.List()\r\n _template = \"\"\"\r\n<sl-dropdown>\r\n <sl-button slot=\"trigger\" caret>Dropdown</sl-button>\r\n <sl-menu>\r\n {% for obj in options %}\r\n <sl-menu-item id=\"option\">${obj}</sl-menu-item>\r\n {% endfor %}\r\n </sl-menu>\r\n</sl-dropdown>\r\n\"\"\"\r\n\r\n __javascript_modules__=[\"https://cdn.jsdelivr.net/npm/@shoelace-style/[email protected]/dist/shoelace.js\"]\r\n __css__ = [\"https://cdn.jsdelivr.net/npm/@shoelace-style/[email protected]/dist/themes/light.css\"]\r\n # _child_config = {'obj': 'literal'}\r\n\r\nimport panel as pn\r\n\r\npn.extension()\r\n\r\nselect = ShoelaceSelect(options=[\"apple\", \"pear\", \"banana\"])\r\n\r\npn.Column(select).servable()\r\n```\r\n\r\nI simple cannot find any documentation or example telling me how to provide these a literal values.\n", "before_files": [{"content": "import difflib\nimport re\n\nfrom collections import defaultdict\nfrom html.parser import HTMLParser\n\nimport bokeh.core.properties as bp\n\nfrom bokeh.events import ModelEvent\nfrom bokeh.model import DataModel\nfrom bokeh.models import HTMLBox, LayoutDOM\n\nendfor = '{%-? endfor -?%}'\nlist_iter_re = r'{%-? for (\\s*[A-Za-z_]\\w*\\s*) in (\\s*[A-Za-z_]\\w*\\s*) -?%}'\nitems_iter_re = r'{%-? for \\s*[A-Za-z_]\\w*\\s*, (\\s*[A-Za-z_]\\w*\\s*) in (\\s*[A-Za-z_]\\w*\\s*)\\.items\\(\\) -?%}'\nvalues_iter_re = r'{%-? for (\\s*[A-Za-z_]\\w*\\s*) in (\\s*[A-Za-z_]\\w*\\s*)\\.values\\(\\) -?%}'\n\n\nclass ReactiveHTMLParser(HTMLParser):\n\n def __init__(self, cls, template=True):\n super().__init__()\n self.template = template\n self.cls = cls\n self.attrs = defaultdict(list)\n self.children = {}\n self.nodes = []\n self.looped = []\n self._template_re = re.compile(r'\\$\\{[^}]+\\}')\n self._literal_re = re.compile(r'\\{\\{[^}]+\\}\\}')\n self._current_node = None\n self._node_stack = []\n self._open_for = False\n self.loop_map = {}\n self.loop_var_map = defaultdict(list)\n\n def handle_starttag(self, tag, attrs):\n attrs = dict(attrs)\n dom_id = attrs.pop('id', None)\n self._current_node = None\n self._node_stack.append((tag, dom_id))\n\n if not dom_id:\n for attr, value in attrs.items():\n if value is None:\n continue\n params, methods = [], []\n for match in self._template_re.findall(value):\n match = match[2:-1]\n if match.startswith('model.'):\n continue\n if match in self.cls.param:\n params.append(match)\n elif hasattr(self.cls, match):\n methods.append(match)\n if methods:\n raise ValueError(\n \"DOM nodes with an attached callback must declare \"\n f\"an id. Found <{tag}> node with the `{attr}` callback \"\n f\"referencing the `{methods[0]}` method. Add an id \"\n \"attribute like this: \"\n f\"<{tag} id=\\\"{tag}\\\" {attr}=\\\"${{{methods[0]}}}>...</{tag}>.\"\n )\n elif params:\n literal = value.replace(f'${{{params[0]}}}', f'{{{{{params[0]}}}}}')\n raise ValueError(\n \"DOM node with a linked parameter declaration \"\n f\"must declare an id. Found <{tag}> node with \"\n f\"the `{attr}` attribute referencing the `{params[0]}` \"\n \"parameter. Either declare an id on the node, \"\n f\"i.e. <{tag} id=\\\"{tag}\\\" {attr}=\\\"{value}\\\">...</{tag}>, \"\n \"or insert the value as a literal: \"\n f\"<{tag} {attr}=\\\"{literal}\\\">...</{tag}>.\"\n )\n return\n\n if dom_id in self.nodes:\n raise ValueError(f'Multiple DOM nodes with id=\"{dom_id}\" found.')\n self._current_node = dom_id\n self.nodes.append(dom_id)\n for attr, value in attrs.items():\n if value is None:\n continue\n matches = []\n for match in self._template_re.findall(value):\n if not match[2:-1].startswith('model.'):\n matches.append(match[2:-1])\n if matches:\n self.attrs[dom_id].append((attr, matches, value.replace('${', '{')))\n\n def handle_endtag(self, tag):\n self._node_stack.pop()\n self._current_node = self._node_stack[-1][1] if self._node_stack else None\n\n def handle_data(self, data):\n if not self.template:\n return\n\n dom_id = self._current_node\n matches = []\n for match in self._template_re.findall(data):\n var = match[2:-1].strip()\n if match[2:-1] not in self.loop_var_map[var]:\n self.loop_var_map[var].append(match[2:-1])\n if var.endswith('.index0'):\n matches.append('${%s }}]}' % var)\n else:\n matches.append('${%s}' % var)\n\n literal_matches = []\n for match in self._literal_re.findall(data):\n match = match[2:-2].strip()\n if match.endswith('.index0'):\n literal_matches.append('{{%s }}]}' % match)\n else:\n literal_matches.append('{{ %s }}' % match)\n\n # Detect templating for loops\n list_loop = re.findall(list_iter_re, data)\n values_loop = re.findall(values_iter_re, data)\n items_loop = re.findall(items_iter_re, data)\n nloops = len(list_loop) + len(values_loop) + len(items_loop)\n if nloops > 1 and nloops and self._open_for:\n raise ValueError('Nested for loops currently not supported in templates.')\n elif nloops:\n loop = [loop for loop in (list_loop, values_loop, items_loop) if loop][0]\n var, obj = loop[0]\n if var in self.cls.param:\n raise ValueError(\n f'Loop variable {var} clashes with parameter name. '\n 'Ensure loop variables have a unique name. Relevant '\n f'template section:\\n\\n{data}'\n )\n self.loop_map[var] = obj\n\n open_for = re.search(r'{%-? for', data)\n end_for = re.search(endfor, data)\n if open_for:\n if self._current_node is None:\n node = self._node_stack[-1][0]\n raise ValueError(\n 'Loops may only be used inside a DOM node with an assigned ID. '\n f'The following loop could not be expanded because the <{node}> node '\n f'did not have an assigned id:\\n\\n {data.strip()}'\n )\n self._open_for = True\n if end_for and (not nloops or end_for.start() > open_for.start()):\n self._open_for = False\n\n if self._current_node and literal_matches:\n if len(literal_matches) == 1:\n literal_match = literal_matches[0][2:-2].strip()\n else:\n literal_match = None\n\n if literal_match and (literal_match in self.loop_map) and self._open_for:\n literal_match = self.loop_map[literal_match]\n self.looped.append((dom_id, literal_match))\n\n if not (self._current_node and matches):\n return\n\n if len(matches) == 1:\n match = matches[0][2:-1].strip()\n else:\n for match in matches:\n mode = self.cls._child_config.get(match, 'model')\n if mode != 'template':\n raise ValueError(f\"Cannot match multiple variables in '{mode}' mode.\")\n match = None\n\n # Handle looped variables\n if match and (match in self.loop_map or '[' in match) and self._open_for:\n if match in self.loop_map:\n matches[matches.index('${%s}' % match)] = '${%s}' % self.loop_map[match]\n match = self.loop_map[match]\n elif '[' in match:\n match, _ = match.split('[')\n dom_id = dom_id.replace('-{{ loop.index0 }}', '')\n self.looped.append((dom_id, match))\n\n mode = self.cls._child_config.get(match, 'model')\n if match in self.cls.param and mode != 'template':\n self.children[dom_id] = match\n return\n\n templates = []\n for match in matches:\n match = match[2:-1]\n if match.startswith('model.'):\n continue\n if match not in self.cls.param and '.' not in match:\n params = difflib.get_close_matches(match, list(self.cls.param))\n raise ValueError(f\"{self.cls.__name__} HTML template references \"\n f\"unknown parameter '{match}', similar parameters \"\n f\"include {params}.\")\n templates.append(match)\n self.attrs[dom_id].append(('children', templates, data.replace('${', '{')))\n\n\n\ndef find_attrs(html):\n p = ReactiveHTMLParser()\n p.feed(html)\n return p.attrs\n\n\n\nclass DOMEvent(ModelEvent):\n\n event_name = 'dom_event'\n\n def __init__(self, model, node=None, data=None):\n self.data = data\n self.node = node\n super().__init__(model=model)\n\n\nclass ReactiveHTML(HTMLBox):\n\n attrs = bp.Dict(bp.String, bp.List(bp.Tuple(bp.String, bp.List(bp.String), bp.String)))\n\n callbacks = bp.Dict(bp.String, bp.List(bp.Tuple(bp.String, bp.String)))\n\n children = bp.Dict(bp.String, bp.Either(bp.List(bp.Either(bp.Instance(LayoutDOM), bp.String)), bp.String))\n\n data = bp.Instance(DataModel)\n\n events = bp.Dict(bp.String, bp.Dict(bp.String, bp.Bool))\n\n html = bp.String()\n\n looped = bp.List(bp.String)\n\n nodes = bp.List(bp.String)\n\n scripts = bp.Dict(bp.String, bp.List(bp.String))\n\n def __init__(self, **props):\n if 'attrs' not in props and 'html' in props:\n props['attrs'] = find_attrs(props['html'])\n super().__init__(**props)\n", "path": "panel/models/reactive_html.py"}]}
3,749
241
gh_patches_debug_19668
rasdani/github-patches
git_diff
docker__docker-py-1050
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> login failed with default registry I am using docker-py (1.8.0) and trying to using login API. If I don't input `registry='https://index.docker.io/v1/'` . It will raise exception as following: ``` docker.errors.APIError: 500 Server Error: Internal Server Error ("Unexpected status code [301] :") ``` But I saw https://github.com/docker/docker-py/blob/81edb398ebf7ce5c7ef14aa0739de5329589aabe/docker/api/daemon.py#L52 in source code. Should work with default registry. </issue> <code> [start of docker/api/daemon.py] 1 import os 2 import warnings 3 from datetime import datetime 4 5 from ..auth import auth 6 from ..constants import INSECURE_REGISTRY_DEPRECATION_WARNING 7 from ..utils import utils 8 9 10 class DaemonApiMixin(object): 11 def events(self, since=None, until=None, filters=None, decode=None): 12 if isinstance(since, datetime): 13 since = utils.datetime_to_timestamp(since) 14 15 if isinstance(until, datetime): 16 until = utils.datetime_to_timestamp(until) 17 18 if filters: 19 filters = utils.convert_filters(filters) 20 21 params = { 22 'since': since, 23 'until': until, 24 'filters': filters 25 } 26 27 return self._stream_helper( 28 self.get(self._url('/events'), params=params, stream=True), 29 decode=decode 30 ) 31 32 def info(self): 33 return self._result(self._get(self._url("/info")), True) 34 35 def login(self, username, password=None, email=None, registry=None, 36 reauth=False, insecure_registry=False, dockercfg_path=None): 37 if insecure_registry: 38 warnings.warn( 39 INSECURE_REGISTRY_DEPRECATION_WARNING.format('login()'), 40 DeprecationWarning 41 ) 42 43 # If we don't have any auth data so far, try reloading the config file 44 # one more time in case anything showed up in there. 45 # If dockercfg_path is passed check to see if the config file exists, 46 # if so load that config. 47 if dockercfg_path and os.path.exists(dockercfg_path): 48 self._auth_configs = auth.load_config(dockercfg_path) 49 elif not self._auth_configs: 50 self._auth_configs = auth.load_config() 51 52 registry = registry or auth.INDEX_URL 53 54 authcfg = auth.resolve_authconfig(self._auth_configs, registry) 55 # If we found an existing auth config for this registry and username 56 # combination, we can return it immediately unless reauth is requested. 57 if authcfg and authcfg.get('username', None) == username \ 58 and not reauth: 59 return authcfg 60 61 req_data = { 62 'username': username, 63 'password': password, 64 'email': email, 65 'serveraddress': registry, 66 } 67 68 response = self._post_json(self._url('/auth'), data=req_data) 69 if response.status_code == 200: 70 self._auth_configs[registry] = req_data 71 return self._result(response, json=True) 72 73 def ping(self): 74 return self._result(self._get(self._url('/_ping'))) 75 76 def version(self, api_version=True): 77 url = self._url("/version", versioned_api=api_version) 78 return self._result(self._get(url), json=True) 79 [end of docker/api/daemon.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docker/api/daemon.py b/docker/api/daemon.py --- a/docker/api/daemon.py +++ b/docker/api/daemon.py @@ -49,8 +49,6 @@ elif not self._auth_configs: self._auth_configs = auth.load_config() - registry = registry or auth.INDEX_URL - authcfg = auth.resolve_authconfig(self._auth_configs, registry) # If we found an existing auth config for this registry and username # combination, we can return it immediately unless reauth is requested. @@ -67,7 +65,7 @@ response = self._post_json(self._url('/auth'), data=req_data) if response.status_code == 200: - self._auth_configs[registry] = req_data + self._auth_configs[registry or auth.INDEX_NAME] = req_data return self._result(response, json=True) def ping(self):
{"golden_diff": "diff --git a/docker/api/daemon.py b/docker/api/daemon.py\n--- a/docker/api/daemon.py\n+++ b/docker/api/daemon.py\n@@ -49,8 +49,6 @@\n elif not self._auth_configs:\n self._auth_configs = auth.load_config()\n \n- registry = registry or auth.INDEX_URL\n-\n authcfg = auth.resolve_authconfig(self._auth_configs, registry)\n # If we found an existing auth config for this registry and username\n # combination, we can return it immediately unless reauth is requested.\n@@ -67,7 +65,7 @@\n \n response = self._post_json(self._url('/auth'), data=req_data)\n if response.status_code == 200:\n- self._auth_configs[registry] = req_data\n+ self._auth_configs[registry or auth.INDEX_NAME] = req_data\n return self._result(response, json=True)\n \n def ping(self):\n", "issue": "login failed with default registry\nI am using docker-py (1.8.0) and trying to using login API.\n\nIf I don't input `registry='https://index.docker.io/v1/'` .\nIt will raise exception as following:\n\n```\ndocker.errors.APIError: 500 Server Error: Internal Server Error (\"Unexpected status code [301] :\")\n```\n\nBut I saw https://github.com/docker/docker-py/blob/81edb398ebf7ce5c7ef14aa0739de5329589aabe/docker/api/daemon.py#L52 in source code. Should work with default registry.\n\n", "before_files": [{"content": "import os\nimport warnings\nfrom datetime import datetime\n\nfrom ..auth import auth\nfrom ..constants import INSECURE_REGISTRY_DEPRECATION_WARNING\nfrom ..utils import utils\n\n\nclass DaemonApiMixin(object):\n def events(self, since=None, until=None, filters=None, decode=None):\n if isinstance(since, datetime):\n since = utils.datetime_to_timestamp(since)\n\n if isinstance(until, datetime):\n until = utils.datetime_to_timestamp(until)\n\n if filters:\n filters = utils.convert_filters(filters)\n\n params = {\n 'since': since,\n 'until': until,\n 'filters': filters\n }\n\n return self._stream_helper(\n self.get(self._url('/events'), params=params, stream=True),\n decode=decode\n )\n\n def info(self):\n return self._result(self._get(self._url(\"/info\")), True)\n\n def login(self, username, password=None, email=None, registry=None,\n reauth=False, insecure_registry=False, dockercfg_path=None):\n if insecure_registry:\n warnings.warn(\n INSECURE_REGISTRY_DEPRECATION_WARNING.format('login()'),\n DeprecationWarning\n )\n\n # If we don't have any auth data so far, try reloading the config file\n # one more time in case anything showed up in there.\n # If dockercfg_path is passed check to see if the config file exists,\n # if so load that config.\n if dockercfg_path and os.path.exists(dockercfg_path):\n self._auth_configs = auth.load_config(dockercfg_path)\n elif not self._auth_configs:\n self._auth_configs = auth.load_config()\n\n registry = registry or auth.INDEX_URL\n\n authcfg = auth.resolve_authconfig(self._auth_configs, registry)\n # If we found an existing auth config for this registry and username\n # combination, we can return it immediately unless reauth is requested.\n if authcfg and authcfg.get('username', None) == username \\\n and not reauth:\n return authcfg\n\n req_data = {\n 'username': username,\n 'password': password,\n 'email': email,\n 'serveraddress': registry,\n }\n\n response = self._post_json(self._url('/auth'), data=req_data)\n if response.status_code == 200:\n self._auth_configs[registry] = req_data\n return self._result(response, json=True)\n\n def ping(self):\n return self._result(self._get(self._url('/_ping')))\n\n def version(self, api_version=True):\n url = self._url(\"/version\", versioned_api=api_version)\n return self._result(self._get(url), json=True)\n", "path": "docker/api/daemon.py"}]}
1,416
208
gh_patches_debug_29586
rasdani/github-patches
git_diff
blaze__blaze-1114
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> dask test failure it seems `atop` is using an older dask API ``` ================================================================================= FAILURES ================================================================================== ____________________________________________________________________________ test_compute[expr0] ____________________________________________________________________________ blaze/compute/tests/test_dask.py:69: in test_compute result = compute(expr, dask_ns) ../../../../miniconda/envs/py27/lib/python2.7/site-packages/multipledispatch/dispatcher.py:163: in __call__ return func(*args, **kwargs) blaze/compute/core.py:470: in compute result = top_then_bottom_then_top_again_etc(expr3, d4, **kwargs) blaze/compute/core.py:164: in top_then_bottom_then_top_again_etc expr2, scope2 = bottom_up_until_type_break(expr, scope, **kwargs) blaze/compute/core.py:371: in bottom_up_until_type_break **kwargs)} ../../../../miniconda/envs/py27/lib/python2.7/site-packages/multipledispatch/dispatcher.py:163: in __call__ return func(*args, **kwargs) blaze/compute/dask.py:40: in compute_broadcast *concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data)) ../../../../code/py/dask/dask/array/core.py:1099: in atop numblocks = dict([(a.name, a.numblocks) for a, ind in arginds]) ``` </issue> <code> [start of blaze/compute/dask.py] 1 from __future__ import absolute_import, division, print_function 2 3 from numbers import Number 4 from toolz import concat, first, curry, compose 5 from datashape import DataShape 6 7 from blaze import compute, ndim 8 from blaze.dispatch import dispatch 9 from blaze.compute.core import compute_up, optimize 10 from blaze.expr import (ElemWise, symbol, Reduction, Transpose, TensorDot, 11 Expr, Slice, Broadcast) 12 from blaze.expr.split import split 13 14 from dask.array.core import (_concatenate2, Array, atop, names, transpose, 15 tensordot) 16 17 18 def compute_it(expr, leaves, *data, **kwargs): 19 kwargs.pop('scope') 20 return compute(expr, dict(zip(leaves, data)), **kwargs) 21 22 23 def elemwise_array(expr, *data, **kwargs): 24 leaves = expr._inputs 25 expr_inds = tuple(range(ndim(expr)))[::-1] 26 return atop(curry(compute_it, expr, leaves, **kwargs), 27 next(names), expr_inds, 28 *concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data)) 29 30 31 try: 32 from blaze.compute.numba import (get_numba_ufunc, broadcast_collect, 33 Broadcastable) 34 35 def compute_broadcast(expr, *data, **kwargs): 36 expr_inds = tuple(range(ndim(expr)))[::-1] 37 func = get_numba_ufunc(expr) 38 return atop(func, 39 next(names), expr_inds, 40 *concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data)) 41 42 def optimize_array(expr, *data): 43 return broadcast_collect(expr, Broadcastable=Broadcastable, 44 WantToBroadcast=Broadcastable) 45 46 for i in range(5): 47 compute_up.register(Broadcast, *([(Array, Number)] * i))(compute_broadcast) 48 optimize.register(Expr, *([(Array, Number)] * i))(optimize_array) 49 50 except ImportError: 51 pass 52 53 54 for i in range(5): 55 compute_up.register(ElemWise, *([Array] * i))(elemwise_array) 56 57 58 @dispatch(Reduction, Array) 59 def compute_up(expr, data, **kwargs): 60 leaf = expr._leaves()[0] 61 chunk = symbol('chunk', DataShape(*(tuple(map(first, data.chunks)) + 62 (leaf.dshape.measure,)))) 63 (chunk, chunk_expr), (agg, agg_expr) = split(expr._child, expr, 64 chunk=chunk) 65 66 inds = tuple(range(ndim(leaf))) 67 tmp = atop(curry(compute_it, chunk_expr, [chunk], **kwargs), 68 next(names), inds, 69 data, inds) 70 71 return atop(compose(curry(compute_it, agg_expr, [agg], **kwargs), 72 curry(_concatenate2, axes=expr.axis)), 73 next(names), tuple(i for i in inds if i not in expr.axis), 74 tmp, inds) 75 76 77 @dispatch(Transpose, Array) 78 def compute_up(expr, data, **kwargs): 79 return transpose(data, expr.axes) 80 81 82 @dispatch(TensorDot, Array, Array) 83 def compute_up(expr, lhs, rhs, **kwargs): 84 return tensordot(lhs, rhs, (expr._left_axes, expr._right_axes)) 85 86 87 @dispatch(Slice, Array) 88 def compute_up(expr, data, **kwargs): 89 return data[expr.index] 90 [end of blaze/compute/dask.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/blaze/compute/dask.py b/blaze/compute/dask.py --- a/blaze/compute/dask.py +++ b/blaze/compute/dask.py @@ -24,7 +24,7 @@ leaves = expr._inputs expr_inds = tuple(range(ndim(expr)))[::-1] return atop(curry(compute_it, expr, leaves, **kwargs), - next(names), expr_inds, + expr_inds, *concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data)) @@ -36,7 +36,7 @@ expr_inds = tuple(range(ndim(expr)))[::-1] func = get_numba_ufunc(expr) return atop(func, - next(names), expr_inds, + expr_inds, *concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data)) def optimize_array(expr, *data): @@ -64,13 +64,12 @@ chunk=chunk) inds = tuple(range(ndim(leaf))) - tmp = atop(curry(compute_it, chunk_expr, [chunk], **kwargs), - next(names), inds, - data, inds) + tmp = atop(curry(compute_it, chunk_expr, [chunk], **kwargs), inds, data, + inds) return atop(compose(curry(compute_it, agg_expr, [agg], **kwargs), curry(_concatenate2, axes=expr.axis)), - next(names), tuple(i for i in inds if i not in expr.axis), + tuple(i for i in inds if i not in expr.axis), tmp, inds)
{"golden_diff": "diff --git a/blaze/compute/dask.py b/blaze/compute/dask.py\n--- a/blaze/compute/dask.py\n+++ b/blaze/compute/dask.py\n@@ -24,7 +24,7 @@\n leaves = expr._inputs\n expr_inds = tuple(range(ndim(expr)))[::-1]\n return atop(curry(compute_it, expr, leaves, **kwargs),\n- next(names), expr_inds,\n+ expr_inds,\n *concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data))\n \n \n@@ -36,7 +36,7 @@\n expr_inds = tuple(range(ndim(expr)))[::-1]\n func = get_numba_ufunc(expr)\n return atop(func,\n- next(names), expr_inds,\n+ expr_inds,\n *concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data))\n \n def optimize_array(expr, *data):\n@@ -64,13 +64,12 @@\n chunk=chunk)\n \n inds = tuple(range(ndim(leaf)))\n- tmp = atop(curry(compute_it, chunk_expr, [chunk], **kwargs),\n- next(names), inds,\n- data, inds)\n+ tmp = atop(curry(compute_it, chunk_expr, [chunk], **kwargs), inds, data,\n+ inds)\n \n return atop(compose(curry(compute_it, agg_expr, [agg], **kwargs),\n curry(_concatenate2, axes=expr.axis)),\n- next(names), tuple(i for i in inds if i not in expr.axis),\n+ tuple(i for i in inds if i not in expr.axis),\n tmp, inds)\n", "issue": "dask test failure\nit seems `atop` is using an older dask API\n\n```\n================================================================================= FAILURES ==================================================================================\n____________________________________________________________________________ test_compute[expr0] ____________________________________________________________________________\nblaze/compute/tests/test_dask.py:69: in test_compute\n result = compute(expr, dask_ns)\n../../../../miniconda/envs/py27/lib/python2.7/site-packages/multipledispatch/dispatcher.py:163: in __call__\n return func(*args, **kwargs)\nblaze/compute/core.py:470: in compute\n result = top_then_bottom_then_top_again_etc(expr3, d4, **kwargs)\nblaze/compute/core.py:164: in top_then_bottom_then_top_again_etc\n expr2, scope2 = bottom_up_until_type_break(expr, scope, **kwargs)\nblaze/compute/core.py:371: in bottom_up_until_type_break\n **kwargs)}\n../../../../miniconda/envs/py27/lib/python2.7/site-packages/multipledispatch/dispatcher.py:163: in __call__\n return func(*args, **kwargs)\nblaze/compute/dask.py:40: in compute_broadcast\n *concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data))\n../../../../code/py/dask/dask/array/core.py:1099: in atop\n numblocks = dict([(a.name, a.numblocks) for a, ind in arginds])\n```\n\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nfrom numbers import Number\nfrom toolz import concat, first, curry, compose\nfrom datashape import DataShape\n\nfrom blaze import compute, ndim\nfrom blaze.dispatch import dispatch\nfrom blaze.compute.core import compute_up, optimize\nfrom blaze.expr import (ElemWise, symbol, Reduction, Transpose, TensorDot,\n Expr, Slice, Broadcast)\nfrom blaze.expr.split import split\n\nfrom dask.array.core import (_concatenate2, Array, atop, names, transpose,\n tensordot)\n\n\ndef compute_it(expr, leaves, *data, **kwargs):\n kwargs.pop('scope')\n return compute(expr, dict(zip(leaves, data)), **kwargs)\n\n\ndef elemwise_array(expr, *data, **kwargs):\n leaves = expr._inputs\n expr_inds = tuple(range(ndim(expr)))[::-1]\n return atop(curry(compute_it, expr, leaves, **kwargs),\n next(names), expr_inds,\n *concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data))\n\n\ntry:\n from blaze.compute.numba import (get_numba_ufunc, broadcast_collect,\n Broadcastable)\n\n def compute_broadcast(expr, *data, **kwargs):\n expr_inds = tuple(range(ndim(expr)))[::-1]\n func = get_numba_ufunc(expr)\n return atop(func,\n next(names), expr_inds,\n *concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data))\n\n def optimize_array(expr, *data):\n return broadcast_collect(expr, Broadcastable=Broadcastable,\n WantToBroadcast=Broadcastable)\n\n for i in range(5):\n compute_up.register(Broadcast, *([(Array, Number)] * i))(compute_broadcast)\n optimize.register(Expr, *([(Array, Number)] * i))(optimize_array)\n\nexcept ImportError:\n pass\n\n\nfor i in range(5):\n compute_up.register(ElemWise, *([Array] * i))(elemwise_array)\n\n\n@dispatch(Reduction, Array)\ndef compute_up(expr, data, **kwargs):\n leaf = expr._leaves()[0]\n chunk = symbol('chunk', DataShape(*(tuple(map(first, data.chunks)) +\n (leaf.dshape.measure,))))\n (chunk, chunk_expr), (agg, agg_expr) = split(expr._child, expr,\n chunk=chunk)\n\n inds = tuple(range(ndim(leaf)))\n tmp = atop(curry(compute_it, chunk_expr, [chunk], **kwargs),\n next(names), inds,\n data, inds)\n\n return atop(compose(curry(compute_it, agg_expr, [agg], **kwargs),\n curry(_concatenate2, axes=expr.axis)),\n next(names), tuple(i for i in inds if i not in expr.axis),\n tmp, inds)\n\n\n@dispatch(Transpose, Array)\ndef compute_up(expr, data, **kwargs):\n return transpose(data, expr.axes)\n\n\n@dispatch(TensorDot, Array, Array)\ndef compute_up(expr, lhs, rhs, **kwargs):\n return tensordot(lhs, rhs, (expr._left_axes, expr._right_axes))\n\n\n@dispatch(Slice, Array)\ndef compute_up(expr, data, **kwargs):\n return data[expr.index]\n", "path": "blaze/compute/dask.py"}]}
1,754
370
gh_patches_debug_42806
rasdani/github-patches
git_diff
saleor__saleor-2738
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Create menu items in menuCreate mutation ### What I'm trying to achieve It would be very handy if API user would be able to create the first level of menu items at the menu creation moment. This way we could reduce the number of database queries needed to create a menu and speed up the whole process. ### Describe a proposed solution Change `MenuInput` signature to ``` type MenuInput { name: String! items: [MenuItemCreate!] } ``` </issue> <code> [start of saleor/graphql/menu/mutations.py] 1 import graphene 2 from graphql_jwt.decorators import permission_required 3 4 from ...menu import models 5 from ..core.mutations import BaseMutation, ModelDeleteMutation, ModelMutation 6 from .types import Menu 7 8 9 class MenuInput(graphene.InputObjectType): 10 name = graphene.String(description='Name of the menu.') 11 12 13 class MenuItemInput(graphene.InputObjectType): 14 menu = graphene.ID( 15 description='Menu to which item belongs to.', name='menu') 16 name = graphene.String(description='Name of the menu item.') 17 parent = graphene.ID( 18 description=''' 19 ID of the parent menu. If empty, menu will be top level 20 menu.''', 21 name='parent') 22 url = graphene.String(description='URL of the pointed item.') 23 category = graphene.ID( 24 description='Category to which item points.', name='category') 25 collection = graphene.ID( 26 description='Collection to which item points.', name='collection') 27 page = graphene.ID( 28 description='Page to which item points.', name='page') 29 30 31 class MenuCreate(ModelMutation): 32 class Arguments: 33 input = MenuInput( 34 required=True, 35 description='Fields required to create a menu.') 36 37 class Meta: 38 description = 'Creates a new Menu' 39 model = models.Menu 40 41 @classmethod 42 def user_is_allowed(cls, user, input): 43 return user.has_perm('menu.manage_menus') 44 45 46 class MenuUpdate(ModelMutation): 47 class Arguments: 48 id = graphene.ID( 49 required=True, description='ID of a menu to update.') 50 input = MenuInput( 51 required=True, 52 description='Fields required to update a menu.') 53 54 class Meta: 55 description = 'Updates a menu.' 56 model = models.Menu 57 58 @classmethod 59 def user_is_allowed(cls, user, input): 60 return user.has_perm('menu.manage_menus') 61 62 63 class MenuDelete(ModelDeleteMutation): 64 class Arguments: 65 id = graphene.ID( 66 required=True, description='ID of a menu to delete.') 67 68 class Meta: 69 description = 'Deletes a menu.' 70 model = models.Menu 71 72 @classmethod 73 def user_is_allowed(cls, user, input): 74 return user.has_perm('menu.manage_menus') 75 76 77 class MenuItemCreate(ModelMutation): 78 class Arguments: 79 input = MenuItemInput( 80 required=True, 81 description="""Fields required to update a menu item. 82 Only one of 'url', 'category', 'page', 'collection' is allowed 83 per item""") 84 85 class Meta: 86 description = 'Creates a new Menu' 87 model = models.MenuItem 88 89 @classmethod 90 def user_is_allowed(cls, user, input): 91 return user.has_perm('menu.manage_menus') 92 93 @classmethod 94 def clean_input(cls, info, instance, input, errors): 95 cleaned_input = super().clean_input(info, instance, input, errors) 96 items = [ 97 cleaned_input.get('page'), cleaned_input.get('collection'), 98 cleaned_input.get('url'), cleaned_input.get('category')] 99 items = [item for item in items if item is not None] 100 if len(items) > 1: 101 cls.add_error( 102 errors=errors, 103 field='items', message='More than one item provided.') 104 return cleaned_input 105 106 107 class MenuItemUpdate(MenuItemCreate): 108 class Arguments: 109 id = graphene.ID( 110 required=True, description='ID of a menu item to update.') 111 input = MenuItemInput( 112 required=True, 113 description="""Fields required to update a menu item. 114 Only one of 'url', 'category', 'page', 'collection' is allowed 115 per item""") 116 117 class Meta: 118 description = 'Updates a menu item.' 119 model = models.MenuItem 120 121 @classmethod 122 def user_is_allowed(cls, user, input): 123 return user.has_perm('menu.manage_menus') 124 125 @classmethod 126 def construct_instance(cls, instance, cleaned_data): 127 # Only one item can be assigned per menu item 128 instance.page = None 129 instance.collection = None 130 instance.category = None 131 instance.url = None 132 return super().construct_instance(instance, cleaned_data) 133 134 135 class MenuItemDelete(ModelDeleteMutation): 136 class Arguments: 137 id = graphene.ID( 138 required=True, description='ID of a menu item to delete.') 139 140 class Meta: 141 description = 'Deletes a menu item.' 142 model = models.MenuItem 143 144 @classmethod 145 def user_is_allowed(cls, user, input): 146 return user.has_perm('menu.manage_menus') 147 148 149 class NavigationType(graphene.Enum): 150 MAIN = 'main' 151 SECONDARY = 'secondary' 152 153 @property 154 def description(self): 155 if self == NavigationType.MAIN: 156 return 'Main storefront\'s navigation.' 157 return 'Secondary storefront\'s navigation.' 158 159 160 class AssignNavigation(BaseMutation): 161 menu = graphene.Field(Menu, description='Assigned navigation menu.') 162 163 class Arguments: 164 menu = graphene.ID( 165 description='ID of the menu.') 166 navigation_type = NavigationType( 167 description='Type of the navigation bar to assign the menu to.', 168 required=True) 169 170 class Meta: 171 description = 'Assigns storefront\'s navigation menus.' 172 173 @classmethod 174 @permission_required(['menu.manage_menus', 'site.manage_settings']) 175 def mutate(cls, root, info, navigation_type, menu=None): 176 errors = [] 177 site_settings = info.context.site.settings 178 if menu is not None: 179 menu = cls.get_node_or_error( 180 info, menu, errors=errors, field='menu') 181 if not errors: 182 if navigation_type == NavigationType.MAIN: 183 site_settings.top_menu = menu 184 site_settings.save(update_fields=['top_menu']) 185 elif navigation_type == NavigationType.SECONDARY: 186 site_settings.bottom_menu = menu 187 site_settings.save(update_fields=['bottom_menu']) 188 else: 189 raise AssertionError( 190 'Unknown navigation type: %s' % navigation_type) 191 return AssignNavigation(menu=menu, errors=errors) 192 [end of saleor/graphql/menu/mutations.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/saleor/graphql/menu/mutations.py b/saleor/graphql/menu/mutations.py --- a/saleor/graphql/menu/mutations.py +++ b/saleor/graphql/menu/mutations.py @@ -3,22 +3,13 @@ from ...menu import models from ..core.mutations import BaseMutation, ModelDeleteMutation, ModelMutation +from ..product.types import Category, Collection +from ..page.types import Page from .types import Menu -class MenuInput(graphene.InputObjectType): - name = graphene.String(description='Name of the menu.') - - class MenuItemInput(graphene.InputObjectType): - menu = graphene.ID( - description='Menu to which item belongs to.', name='menu') name = graphene.String(description='Name of the menu item.') - parent = graphene.ID( - description=''' - ID of the parent menu. If empty, menu will be top level - menu.''', - name='parent') url = graphene.String(description='URL of the pointed item.') category = graphene.ID( description='Category to which item points.', name='category') @@ -28,9 +19,28 @@ description='Page to which item points.', name='page') +class MenuItemCreateInput(MenuItemInput): + menu = graphene.ID( + description='Menu to which item belongs to.', name='menu') + parent = graphene.ID( + description=''' + ID of the parent menu. If empty, menu will be top level + menu.''', + name='parent') + + +class MenuInput(graphene.InputObjectType): + name = graphene.String(description='Name of the menu.') + + +class MenuCreateInput(MenuInput): + items = graphene.List( + MenuItemInput, description='List of menu items.') + + class MenuCreate(ModelMutation): class Arguments: - input = MenuInput( + input = MenuCreateInput( required=True, description='Fields required to create a menu.') @@ -42,6 +52,45 @@ def user_is_allowed(cls, user, input): return user.has_perm('menu.manage_menus') + @classmethod + def clean_input(cls, info, instance, input, errors): + cleaned_input = super().clean_input(info, instance, input, errors) + items = [] + for item in cleaned_input.get('items', []): + category = item.get('category') + collection = item.get('collection') + page = item.get('page') + url = item.get('url') + if len([i for i in [category, collection, page, url] if i]) > 1: + cls.add_error( + errors, 'items', 'More than one item provided.') + else: + if category: + category = cls.get_node_or_error( + info, category, errors, 'items', only_type=Category) + item['category'] = category + elif collection: + collection = cls.get_node_or_error( + info, collection, errors, 'items', + only_type=Collection) + item['collection'] = collection + elif page: + page = cls.get_node_or_error( + info, page, errors, 'items', only_type=Page) + item['page'] = page + elif not url: + cls.add_error(errors, 'items', 'No menu item provided.') + items.append(item) + cleaned_input['items'] = items + return cleaned_input + + @classmethod + def _save_m2m(cls, info, instance, cleaned_data): + super()._save_m2m(info, instance, cleaned_data) + items = cleaned_data.get('items', []) + for item in items: + instance.items.create(**item) + class MenuUpdate(ModelMutation): class Arguments: @@ -76,7 +125,7 @@ class MenuItemCreate(ModelMutation): class Arguments: - input = MenuItemInput( + input = MenuItemCreateInput( required=True, description="""Fields required to update a menu item. Only one of 'url', 'category', 'page', 'collection' is allowed
{"golden_diff": "diff --git a/saleor/graphql/menu/mutations.py b/saleor/graphql/menu/mutations.py\n--- a/saleor/graphql/menu/mutations.py\n+++ b/saleor/graphql/menu/mutations.py\n@@ -3,22 +3,13 @@\n \n from ...menu import models\n from ..core.mutations import BaseMutation, ModelDeleteMutation, ModelMutation\n+from ..product.types import Category, Collection\n+from ..page.types import Page\n from .types import Menu\n \n \n-class MenuInput(graphene.InputObjectType):\n- name = graphene.String(description='Name of the menu.')\n-\n-\n class MenuItemInput(graphene.InputObjectType):\n- menu = graphene.ID(\n- description='Menu to which item belongs to.', name='menu')\n name = graphene.String(description='Name of the menu item.')\n- parent = graphene.ID(\n- description='''\n- ID of the parent menu. If empty, menu will be top level\n- menu.''',\n- name='parent')\n url = graphene.String(description='URL of the pointed item.')\n category = graphene.ID(\n description='Category to which item points.', name='category')\n@@ -28,9 +19,28 @@\n description='Page to which item points.', name='page')\n \n \n+class MenuItemCreateInput(MenuItemInput):\n+ menu = graphene.ID(\n+ description='Menu to which item belongs to.', name='menu')\n+ parent = graphene.ID(\n+ description='''\n+ ID of the parent menu. If empty, menu will be top level\n+ menu.''',\n+ name='parent')\n+\n+\n+class MenuInput(graphene.InputObjectType):\n+ name = graphene.String(description='Name of the menu.')\n+\n+\n+class MenuCreateInput(MenuInput):\n+ items = graphene.List(\n+ MenuItemInput, description='List of menu items.')\n+\n+\n class MenuCreate(ModelMutation):\n class Arguments:\n- input = MenuInput(\n+ input = MenuCreateInput(\n required=True,\n description='Fields required to create a menu.')\n \n@@ -42,6 +52,45 @@\n def user_is_allowed(cls, user, input):\n return user.has_perm('menu.manage_menus')\n \n+ @classmethod\n+ def clean_input(cls, info, instance, input, errors):\n+ cleaned_input = super().clean_input(info, instance, input, errors)\n+ items = []\n+ for item in cleaned_input.get('items', []):\n+ category = item.get('category')\n+ collection = item.get('collection')\n+ page = item.get('page')\n+ url = item.get('url')\n+ if len([i for i in [category, collection, page, url] if i]) > 1:\n+ cls.add_error(\n+ errors, 'items', 'More than one item provided.')\n+ else:\n+ if category:\n+ category = cls.get_node_or_error(\n+ info, category, errors, 'items', only_type=Category)\n+ item['category'] = category\n+ elif collection:\n+ collection = cls.get_node_or_error(\n+ info, collection, errors, 'items',\n+ only_type=Collection)\n+ item['collection'] = collection\n+ elif page:\n+ page = cls.get_node_or_error(\n+ info, page, errors, 'items', only_type=Page)\n+ item['page'] = page\n+ elif not url:\n+ cls.add_error(errors, 'items', 'No menu item provided.')\n+ items.append(item)\n+ cleaned_input['items'] = items\n+ return cleaned_input\n+\n+ @classmethod\n+ def _save_m2m(cls, info, instance, cleaned_data):\n+ super()._save_m2m(info, instance, cleaned_data)\n+ items = cleaned_data.get('items', [])\n+ for item in items:\n+ instance.items.create(**item)\n+\n \n class MenuUpdate(ModelMutation):\n class Arguments:\n@@ -76,7 +125,7 @@\n \n class MenuItemCreate(ModelMutation):\n class Arguments:\n- input = MenuItemInput(\n+ input = MenuItemCreateInput(\n required=True,\n description=\"\"\"Fields required to update a menu item.\n Only one of 'url', 'category', 'page', 'collection' is allowed\n", "issue": "Create menu items in menuCreate mutation\n### What I'm trying to achieve\r\nIt would be very handy if API user would be able to create the first level of menu items at the menu creation moment. This way we could reduce the number of database queries needed to create a menu and speed up the whole process. \r\n\r\n### Describe a proposed solution\r\nChange `MenuInput` signature to\r\n```\r\ntype MenuInput {\r\n name: String!\r\n items: [MenuItemCreate!]\r\n}\r\n```\r\n\n", "before_files": [{"content": "import graphene\nfrom graphql_jwt.decorators import permission_required\n\nfrom ...menu import models\nfrom ..core.mutations import BaseMutation, ModelDeleteMutation, ModelMutation\nfrom .types import Menu\n\n\nclass MenuInput(graphene.InputObjectType):\n name = graphene.String(description='Name of the menu.')\n\n\nclass MenuItemInput(graphene.InputObjectType):\n menu = graphene.ID(\n description='Menu to which item belongs to.', name='menu')\n name = graphene.String(description='Name of the menu item.')\n parent = graphene.ID(\n description='''\n ID of the parent menu. If empty, menu will be top level\n menu.''',\n name='parent')\n url = graphene.String(description='URL of the pointed item.')\n category = graphene.ID(\n description='Category to which item points.', name='category')\n collection = graphene.ID(\n description='Collection to which item points.', name='collection')\n page = graphene.ID(\n description='Page to which item points.', name='page')\n\n\nclass MenuCreate(ModelMutation):\n class Arguments:\n input = MenuInput(\n required=True,\n description='Fields required to create a menu.')\n\n class Meta:\n description = 'Creates a new Menu'\n model = models.Menu\n\n @classmethod\n def user_is_allowed(cls, user, input):\n return user.has_perm('menu.manage_menus')\n\n\nclass MenuUpdate(ModelMutation):\n class Arguments:\n id = graphene.ID(\n required=True, description='ID of a menu to update.')\n input = MenuInput(\n required=True,\n description='Fields required to update a menu.')\n\n class Meta:\n description = 'Updates a menu.'\n model = models.Menu\n\n @classmethod\n def user_is_allowed(cls, user, input):\n return user.has_perm('menu.manage_menus')\n\n\nclass MenuDelete(ModelDeleteMutation):\n class Arguments:\n id = graphene.ID(\n required=True, description='ID of a menu to delete.')\n\n class Meta:\n description = 'Deletes a menu.'\n model = models.Menu\n\n @classmethod\n def user_is_allowed(cls, user, input):\n return user.has_perm('menu.manage_menus')\n\n\nclass MenuItemCreate(ModelMutation):\n class Arguments:\n input = MenuItemInput(\n required=True,\n description=\"\"\"Fields required to update a menu item.\n Only one of 'url', 'category', 'page', 'collection' is allowed\n per item\"\"\")\n\n class Meta:\n description = 'Creates a new Menu'\n model = models.MenuItem\n\n @classmethod\n def user_is_allowed(cls, user, input):\n return user.has_perm('menu.manage_menus')\n\n @classmethod\n def clean_input(cls, info, instance, input, errors):\n cleaned_input = super().clean_input(info, instance, input, errors)\n items = [\n cleaned_input.get('page'), cleaned_input.get('collection'),\n cleaned_input.get('url'), cleaned_input.get('category')]\n items = [item for item in items if item is not None]\n if len(items) > 1:\n cls.add_error(\n errors=errors,\n field='items', message='More than one item provided.')\n return cleaned_input\n\n\nclass MenuItemUpdate(MenuItemCreate):\n class Arguments:\n id = graphene.ID(\n required=True, description='ID of a menu item to update.')\n input = MenuItemInput(\n required=True,\n description=\"\"\"Fields required to update a menu item.\n Only one of 'url', 'category', 'page', 'collection' is allowed\n per item\"\"\")\n\n class Meta:\n description = 'Updates a menu item.'\n model = models.MenuItem\n\n @classmethod\n def user_is_allowed(cls, user, input):\n return user.has_perm('menu.manage_menus')\n\n @classmethod\n def construct_instance(cls, instance, cleaned_data):\n # Only one item can be assigned per menu item\n instance.page = None\n instance.collection = None\n instance.category = None\n instance.url = None\n return super().construct_instance(instance, cleaned_data)\n\n\nclass MenuItemDelete(ModelDeleteMutation):\n class Arguments:\n id = graphene.ID(\n required=True, description='ID of a menu item to delete.')\n\n class Meta:\n description = 'Deletes a menu item.'\n model = models.MenuItem\n\n @classmethod\n def user_is_allowed(cls, user, input):\n return user.has_perm('menu.manage_menus')\n\n\nclass NavigationType(graphene.Enum):\n MAIN = 'main'\n SECONDARY = 'secondary'\n\n @property\n def description(self):\n if self == NavigationType.MAIN:\n return 'Main storefront\\'s navigation.'\n return 'Secondary storefront\\'s navigation.'\n\n\nclass AssignNavigation(BaseMutation):\n menu = graphene.Field(Menu, description='Assigned navigation menu.')\n\n class Arguments:\n menu = graphene.ID(\n description='ID of the menu.')\n navigation_type = NavigationType(\n description='Type of the navigation bar to assign the menu to.',\n required=True)\n\n class Meta:\n description = 'Assigns storefront\\'s navigation menus.'\n\n @classmethod\n @permission_required(['menu.manage_menus', 'site.manage_settings'])\n def mutate(cls, root, info, navigation_type, menu=None):\n errors = []\n site_settings = info.context.site.settings\n if menu is not None:\n menu = cls.get_node_or_error(\n info, menu, errors=errors, field='menu')\n if not errors:\n if navigation_type == NavigationType.MAIN:\n site_settings.top_menu = menu\n site_settings.save(update_fields=['top_menu'])\n elif navigation_type == NavigationType.SECONDARY:\n site_settings.bottom_menu = menu\n site_settings.save(update_fields=['bottom_menu'])\n else:\n raise AssertionError(\n 'Unknown navigation type: %s' % navigation_type)\n return AssignNavigation(menu=menu, errors=errors)\n", "path": "saleor/graphql/menu/mutations.py"}]}
2,361
924
gh_patches_debug_24970
rasdani/github-patches
git_diff
WeblateOrg__weblate-11333
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Glossary check not working on spaceless languages ### Describe the issue When having `check-glossary` enabled, the check always fails on spaceless languages, if the source string has a word in it that is in the glossary. ### I already tried - [X] I've read and searched [the documentation](https://docs.weblate.org/). - [X] I've searched for similar filed issues in this repository. ### Steps to reproduce the behavior 1. Create glossary entry that actually exists in a source string 2. Translate the source string with the glossary entry to Japanese (or Korean or similar) 3. Enable `check-glossary` 4. Notice a false positive on the check ### Expected behavior _No response_ ### Screenshots ![Bildschirmfoto vom 2024-03-30 17-12-24](https://github.com/WeblateOrg/weblate/assets/11144627/d906ae16-7c70-42f7-943a-ac9f03b0517d) ![Bildschirmfoto vom 2024-03-30 17-12-20](https://github.com/WeblateOrg/weblate/assets/11144627/65995849-2de4-4df8-a05c-230964d257cc) ### Exception traceback _No response_ ### How do you run Weblate? Docker container ### Weblate versions * Weblate: 5.4.3 * Django: 4.2.11 * siphashc: 2.4.1 * translate-toolkit: 3.12.2 * lxml: 5.1.0 * pillow: 10.2.0 * nh3: 0.2.17 * python-dateutil: 2.9.0.post0 * social-auth-core: 4.5.3 * social-auth-app-django: 5.4.0 * django-crispy-forms: 2.1 * oauthlib: 3.2.2 * django-compressor: 4.4 * djangorestframework: 3.14.0 * django-filter: 23.5 * django-appconf: 1.0.6 * user-agents: 2.2.0 * filelock: 3.13.3 * rapidfuzz: 3.7.0 * openpyxl: 3.1.2 * celery: 5.3.6 * django-celery-beat: 2.5.0 * kombu: 5.3.5 * translation-finder: 2.16 * weblate-language-data: 2024.3 * html2text: 2020.1.16 * pycairo: 1.26.0 * PyGObject: 3.48.1 * diff-match-patch: 20230430 * requests: 2.31.0 * django-redis: 5.4.0 * hiredis: 2.3.2 * sentry-sdk: 1.43.0 * Cython: 3.0.9 * misaka: 2.1.1 * GitPython: 3.1.42 * borgbackup: 1.2.7 * pyparsing: 3.1.2 * ahocorasick_rs: 0.22.0 * python-redis-lock: 4.0.0 * charset-normalizer: 3.3.2 * Python: 3.12.2 * Git: 2.39.2 * psycopg: 3.1.18 * psycopg-binary: 3.1.18 * phply: 1.2.6 * ruamel.yaml: 0.18.6 * tesserocr: 2.6.2 * boto3: 1.34.71 * zeep: 4.2.1 * aeidon: 1.13 * iniparse: 0.5 * mysqlclient: 2.2.4 * Mercurial: 6.7.1 * git-svn: 2.39.2 * git-review: 2.4.0 * PostgreSQL server: 15.2 * Database backends: django.db.backends.postgresql * PostgreSQL implementation: psycopg3 (binary) * Cache backends: default:RedisCache, avatar:FileBasedCache * Email setup: django.core.mail.backends.smtp.EmailBackend: mail.your-server.de * OS encoding: filesystem=utf-8, default=utf-8 * Celery: redis://cache:6379/1, redis://cache:6379/1, regular * Platform: Linux 5.10.0-28-amd64 (x86_64) ### Weblate deploy checks ```shell System check identified some issues: INFOS: ?: (weblate.I028) Backups are not configured, it is highly recommended for production use HINT: https://docs.weblate.org/en/weblate-5.4.3/admin/backup.html System check identified 1 issue (1 silenced). ``` ### Additional context _No response_ </issue> <code> [start of weblate/checks/glossary.py] 1 # Copyright © Michal Čihař <[email protected]> 2 # 3 # SPDX-License-Identifier: GPL-3.0-or-later 4 5 import re 6 7 from django.utils.html import escape, format_html, format_html_join 8 from django.utils.translation import gettext, gettext_lazy 9 10 from weblate.checks.base import TargetCheck 11 12 13 class GlossaryCheck(TargetCheck): 14 default_disabled = True 15 check_id = "check_glossary" 16 name = gettext_lazy("Does not follow glossary") 17 description = gettext_lazy( 18 "The translation does not follow terms defined in a glossary." 19 ) 20 21 def check_single(self, source, target, unit): 22 from weblate.glossary.models import get_glossary_terms 23 24 forbidden = set() 25 mismatched = set() 26 matched = set() 27 for term in get_glossary_terms(unit): 28 term_source = term.source 29 flags = term.all_flags 30 expected = term_source if "read-only" in flags else term.target 31 if "forbidden" in flags: 32 if re.search(rf"\b{re.escape(expected)}\b", target, re.IGNORECASE): 33 forbidden.add(term_source) 34 else: 35 if term_source in matched: 36 continue 37 if re.search(rf"\b{re.escape(expected)}\b", target, re.IGNORECASE): 38 mismatched.discard(term_source) 39 matched.add(term_source) 40 else: 41 mismatched.add(term_source) 42 43 return forbidden | mismatched 44 45 def get_description(self, check_obj): 46 unit = check_obj.unit 47 sources = unit.get_source_plurals() 48 targets = unit.get_target_plurals() 49 source = sources[0] 50 results = set() 51 # Check singular 52 result = self.check_single(source, targets[0], unit) 53 if result: 54 results.update(result) 55 # Do we have more to check? 56 if len(sources) > 1: 57 source = sources[1] 58 # Check plurals against plural from source 59 for target in targets[1:]: 60 result = self.check_single(source, target, unit) 61 if result: 62 results.update(result) 63 64 if not results: 65 return super().get_description(check_obj) 66 67 return format_html( 68 escape( 69 gettext("Following terms are not translated according to glossary: {}") 70 ), 71 format_html_join(", ", "{}", ((term,) for term in sorted(results))), 72 ) 73 [end of weblate/checks/glossary.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/weblate/checks/glossary.py b/weblate/checks/glossary.py --- a/weblate/checks/glossary.py +++ b/weblate/checks/glossary.py @@ -24,17 +24,22 @@ forbidden = set() mismatched = set() matched = set() + boundary = r"\b" if unit.translation.language.uses_whitespace() else "" for term in get_glossary_terms(unit): term_source = term.source flags = term.all_flags expected = term_source if "read-only" in flags else term.target if "forbidden" in flags: - if re.search(rf"\b{re.escape(expected)}\b", target, re.IGNORECASE): + if re.search( + rf"{boundary}{re.escape(expected)}{boundary}", target, re.IGNORECASE + ): forbidden.add(term_source) else: if term_source in matched: continue - if re.search(rf"\b{re.escape(expected)}\b", target, re.IGNORECASE): + if re.search( + rf"{boundary}{re.escape(expected)}{boundary}", target, re.IGNORECASE + ): mismatched.discard(term_source) matched.add(term_source) else:
{"golden_diff": "diff --git a/weblate/checks/glossary.py b/weblate/checks/glossary.py\n--- a/weblate/checks/glossary.py\n+++ b/weblate/checks/glossary.py\n@@ -24,17 +24,22 @@\n forbidden = set()\n mismatched = set()\n matched = set()\n+ boundary = r\"\\b\" if unit.translation.language.uses_whitespace() else \"\"\n for term in get_glossary_terms(unit):\n term_source = term.source\n flags = term.all_flags\n expected = term_source if \"read-only\" in flags else term.target\n if \"forbidden\" in flags:\n- if re.search(rf\"\\b{re.escape(expected)}\\b\", target, re.IGNORECASE):\n+ if re.search(\n+ rf\"{boundary}{re.escape(expected)}{boundary}\", target, re.IGNORECASE\n+ ):\n forbidden.add(term_source)\n else:\n if term_source in matched:\n continue\n- if re.search(rf\"\\b{re.escape(expected)}\\b\", target, re.IGNORECASE):\n+ if re.search(\n+ rf\"{boundary}{re.escape(expected)}{boundary}\", target, re.IGNORECASE\n+ ):\n mismatched.discard(term_source)\n matched.add(term_source)\n else:\n", "issue": "Glossary check not working on spaceless languages\n### Describe the issue\n\nWhen having `check-glossary` enabled, the check always fails on spaceless languages, if the source string has a word in it that is in the glossary.\n\n### I already tried\n\n- [X] I've read and searched [the documentation](https://docs.weblate.org/).\n- [X] I've searched for similar filed issues in this repository.\n\n### Steps to reproduce the behavior\n\n1. Create glossary entry that actually exists in a source string\r\n2. Translate the source string with the glossary entry to Japanese (or Korean or similar)\r\n3. Enable `check-glossary`\r\n4. Notice a false positive on the check\n\n### Expected behavior\n\n_No response_\n\n### Screenshots\n\n![Bildschirmfoto vom 2024-03-30 17-12-24](https://github.com/WeblateOrg/weblate/assets/11144627/d906ae16-7c70-42f7-943a-ac9f03b0517d)\r\n![Bildschirmfoto vom 2024-03-30 17-12-20](https://github.com/WeblateOrg/weblate/assets/11144627/65995849-2de4-4df8-a05c-230964d257cc)\r\n\n\n### Exception traceback\n\n_No response_\n\n### How do you run Weblate?\n\nDocker container\n\n### Weblate versions\n\n * Weblate: 5.4.3\r\n * Django: 4.2.11\r\n * siphashc: 2.4.1\r\n * translate-toolkit: 3.12.2\r\n * lxml: 5.1.0\r\n * pillow: 10.2.0\r\n * nh3: 0.2.17\r\n * python-dateutil: 2.9.0.post0\r\n * social-auth-core: 4.5.3\r\n * social-auth-app-django: 5.4.0\r\n * django-crispy-forms: 2.1\r\n * oauthlib: 3.2.2\r\n * django-compressor: 4.4\r\n * djangorestframework: 3.14.0\r\n * django-filter: 23.5\r\n * django-appconf: 1.0.6\r\n * user-agents: 2.2.0\r\n * filelock: 3.13.3\r\n * rapidfuzz: 3.7.0\r\n * openpyxl: 3.1.2\r\n * celery: 5.3.6\r\n * django-celery-beat: 2.5.0\r\n * kombu: 5.3.5\r\n * translation-finder: 2.16\r\n * weblate-language-data: 2024.3\r\n * html2text: 2020.1.16\r\n * pycairo: 1.26.0\r\n * PyGObject: 3.48.1\r\n * diff-match-patch: 20230430\r\n * requests: 2.31.0\r\n * django-redis: 5.4.0\r\n * hiredis: 2.3.2\r\n * sentry-sdk: 1.43.0\r\n * Cython: 3.0.9\r\n * misaka: 2.1.1\r\n * GitPython: 3.1.42\r\n * borgbackup: 1.2.7\r\n * pyparsing: 3.1.2\r\n * ahocorasick_rs: 0.22.0\r\n * python-redis-lock: 4.0.0\r\n * charset-normalizer: 3.3.2\r\n * Python: 3.12.2\r\n * Git: 2.39.2\r\n * psycopg: 3.1.18\r\n * psycopg-binary: 3.1.18\r\n * phply: 1.2.6\r\n * ruamel.yaml: 0.18.6\r\n * tesserocr: 2.6.2\r\n * boto3: 1.34.71\r\n * zeep: 4.2.1\r\n * aeidon: 1.13\r\n * iniparse: 0.5\r\n * mysqlclient: 2.2.4\r\n * Mercurial: 6.7.1\r\n * git-svn: 2.39.2\r\n * git-review: 2.4.0\r\n * PostgreSQL server: 15.2\r\n * Database backends: django.db.backends.postgresql\r\n * PostgreSQL implementation: psycopg3 (binary)\r\n * Cache backends: default:RedisCache, avatar:FileBasedCache\r\n * Email setup: django.core.mail.backends.smtp.EmailBackend: mail.your-server.de\r\n * OS encoding: filesystem=utf-8, default=utf-8\r\n * Celery: redis://cache:6379/1, redis://cache:6379/1, regular\r\n * Platform: Linux 5.10.0-28-amd64 (x86_64)\n\n### Weblate deploy checks\n\n```shell\nSystem check identified some issues:\r\n\r\nINFOS:\r\n?: (weblate.I028) Backups are not configured, it is highly recommended for production use\r\n\tHINT: https://docs.weblate.org/en/weblate-5.4.3/admin/backup.html\r\n\r\nSystem check identified 1 issue (1 silenced).\n```\n\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "# Copyright \u00a9 Michal \u010ciha\u0159 <[email protected]>\n#\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nimport re\n\nfrom django.utils.html import escape, format_html, format_html_join\nfrom django.utils.translation import gettext, gettext_lazy\n\nfrom weblate.checks.base import TargetCheck\n\n\nclass GlossaryCheck(TargetCheck):\n default_disabled = True\n check_id = \"check_glossary\"\n name = gettext_lazy(\"Does not follow glossary\")\n description = gettext_lazy(\n \"The translation does not follow terms defined in a glossary.\"\n )\n\n def check_single(self, source, target, unit):\n from weblate.glossary.models import get_glossary_terms\n\n forbidden = set()\n mismatched = set()\n matched = set()\n for term in get_glossary_terms(unit):\n term_source = term.source\n flags = term.all_flags\n expected = term_source if \"read-only\" in flags else term.target\n if \"forbidden\" in flags:\n if re.search(rf\"\\b{re.escape(expected)}\\b\", target, re.IGNORECASE):\n forbidden.add(term_source)\n else:\n if term_source in matched:\n continue\n if re.search(rf\"\\b{re.escape(expected)}\\b\", target, re.IGNORECASE):\n mismatched.discard(term_source)\n matched.add(term_source)\n else:\n mismatched.add(term_source)\n\n return forbidden | mismatched\n\n def get_description(self, check_obj):\n unit = check_obj.unit\n sources = unit.get_source_plurals()\n targets = unit.get_target_plurals()\n source = sources[0]\n results = set()\n # Check singular\n result = self.check_single(source, targets[0], unit)\n if result:\n results.update(result)\n # Do we have more to check?\n if len(sources) > 1:\n source = sources[1]\n # Check plurals against plural from source\n for target in targets[1:]:\n result = self.check_single(source, target, unit)\n if result:\n results.update(result)\n\n if not results:\n return super().get_description(check_obj)\n\n return format_html(\n escape(\n gettext(\"Following terms are not translated according to glossary: {}\")\n ),\n format_html_join(\", \", \"{}\", ((term,) for term in sorted(results))),\n )\n", "path": "weblate/checks/glossary.py"}]}
2,461
284
gh_patches_debug_14632
rasdani/github-patches
git_diff
PyGithub__PyGithub-1053
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> dismiss a PR review? Am I reading the docs correctly and understanding that there is no support for [dismissing a PR review](https://developer.github.com/v3/pulls/reviews/#dismiss-a-pull-request-review)? </issue> <code> [start of github/PullRequestReview.py] 1 # -*- coding: utf-8 -*- 2 3 ############################ Copyrights and license ############################ 4 # # 5 # Copyright 2017 Aaron Levine <[email protected]> # 6 # Copyright 2017 Mike Miller <[email protected]> # 7 # Copyright 2018 Darragh Bailey <[email protected]> # 8 # Copyright 2018 Wan Liuyang <[email protected]> # 9 # Copyright 2018 sfdye <[email protected]> # 10 # # 11 # This file is part of PyGithub. # 12 # http://pygithub.readthedocs.io/ # 13 # # 14 # PyGithub is free software: you can redistribute it and/or modify it under # 15 # the terms of the GNU Lesser General Public License as published by the Free # 16 # Software Foundation, either version 3 of the License, or (at your option) # 17 # any later version. # 18 # # 19 # PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY # 20 # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # 21 # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # 22 # details. # 23 # # 24 # You should have received a copy of the GNU Lesser General Public License # 25 # along with PyGithub. If not, see <http://www.gnu.org/licenses/>. # 26 # # 27 ################################################################################ 28 29 import github.GithubObject 30 31 import github.NamedUser 32 33 34 class PullRequestReview(github.GithubObject.CompletableGithubObject): 35 """ 36 This class represents PullRequestReviews. The reference can be found here https://developer.github.com/v3/pulls/reviews/ 37 """ 38 39 def __repr__(self): 40 return self.get__repr__({"id": self._id.value, "user": self._user.value}) 41 42 @property 43 def id(self): 44 """ 45 :type: integer 46 """ 47 self._completeIfNotSet(self._id) 48 return self._id.value 49 50 @property 51 def user(self): 52 """ 53 :type: :class:`github.NamedUser.NamedUser` 54 """ 55 self._completeIfNotSet(self._user) 56 return self._user.value 57 58 @property 59 def body(self): 60 """ 61 :type: string 62 """ 63 self._completeIfNotSet(self._body) 64 return self._body.value 65 66 @property 67 def commit_id(self): 68 """ 69 :type: string 70 """ 71 self._completeIfNotSet(self._commit_id) 72 return self._commit_id.value 73 74 @property 75 def state(self): 76 """ 77 :type: string 78 """ 79 self._completeIfNotSet(self._state) 80 return self._state.value 81 82 @property 83 def url(self): 84 """ 85 :type: string 86 """ 87 self._completeIfNotSet(self._url) 88 return self._url.value 89 90 @property 91 def html_url(self): 92 """ 93 :type: string 94 """ 95 self._completeIfNotSet(self._html_url) 96 return self._html_url.value 97 98 @property 99 def pull_request_url(self): 100 """ 101 :type: string 102 """ 103 self._completeIfNotSet(self._pull_request_url) 104 return self._pull_request_url.value 105 106 @property 107 def submitted_at(self): 108 """ 109 :type: datetime.datetime 110 """ 111 self._completeIfNotSet(self._submitted_at) 112 return self._submitted_at.value 113 114 def _initAttributes(self): 115 self._id = github.GithubObject.NotSet 116 self._user = github.GithubObject.NotSet 117 self._body = github.GithubObject.NotSet 118 self._commit_id = github.GithubObject.NotSet 119 self._state = github.GithubObject.NotSet 120 self._url = github.GithubObject.NotSet 121 self._html_url = github.GithubObject.NotSet 122 self._pull_request_url = github.GithubObject.NotSet 123 self._submitted_at = github.GithubObject.NotSet 124 125 def _useAttributes(self, attributes): 126 if "id" in attributes: # pragma no branch 127 self._id = self._makeIntAttribute(attributes["id"]) 128 if "user" in attributes: # pragma no branch 129 self._user = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["user"]) 130 if "body" in attributes: # pragma no branch 131 self._body = self._makeStringAttribute(attributes["body"]) 132 if "commit_id" in attributes: # pragma no branch 133 self._commit_id = self._makeStringAttribute(attributes["commit_id"]) 134 if "state" in attributes: # pragma no branch 135 self._state = self._makeStringAttribute(attributes["state"]) 136 if "url" in attributes: # pragma no branch 137 self._url = self._makeStringAttribute(attributes["url"]) 138 if "html_url" in attributes: # pragma no branch 139 self._html_url = self._makeStringAttribute(attributes["html_url"]) 140 if "pull_request_url" in attributes: # pragma no branch 141 self._pull_request_url = self._makeStringAttribute(attributes["pull_request_url"]) 142 if "submitted_at" in attributes: # pragma no branch 143 self._submitted_at = self._makeDatetimeAttribute(attributes["submitted_at"]) 144 [end of github/PullRequestReview.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/github/PullRequestReview.py b/github/PullRequestReview.py --- a/github/PullRequestReview.py +++ b/github/PullRequestReview.py @@ -111,6 +111,19 @@ self._completeIfNotSet(self._submitted_at) return self._submitted_at.value + def dismiss(self, message): + """ + :calls: `PUT /repos/:owner/:repo/pulls/:number/reviews/:review_id/dismissals <https://developer.github.com/v3/pulls/reviews/>`_ + :rtype: None + """ + assert isinstance(message, (str, unicode)), message + post_parameters = {'message': message} + headers, data = self._requester.requestJsonAndCheck( + "PUT", + self.pull_request_url + "/reviews/%s/dismissals" % self.id, + input=post_parameters + ) + def _initAttributes(self): self._id = github.GithubObject.NotSet self._user = github.GithubObject.NotSet
{"golden_diff": "diff --git a/github/PullRequestReview.py b/github/PullRequestReview.py\n--- a/github/PullRequestReview.py\n+++ b/github/PullRequestReview.py\n@@ -111,6 +111,19 @@\n self._completeIfNotSet(self._submitted_at)\n return self._submitted_at.value\n \n+ def dismiss(self, message):\n+ \"\"\"\n+ :calls: `PUT /repos/:owner/:repo/pulls/:number/reviews/:review_id/dismissals <https://developer.github.com/v3/pulls/reviews/>`_\n+ :rtype: None\n+ \"\"\"\n+ assert isinstance(message, (str, unicode)), message\n+ post_parameters = {'message': message}\n+ headers, data = self._requester.requestJsonAndCheck(\n+ \"PUT\",\n+ self.pull_request_url + \"/reviews/%s/dismissals\" % self.id,\n+ input=post_parameters\n+ )\n+\n def _initAttributes(self):\n self._id = github.GithubObject.NotSet\n self._user = github.GithubObject.NotSet\n", "issue": "dismiss a PR review?\nAm I reading the docs correctly and understanding that there is no support for [dismissing a PR review](https://developer.github.com/v3/pulls/reviews/#dismiss-a-pull-request-review)?\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n############################ Copyrights and license ############################\n# #\n# Copyright 2017 Aaron Levine <[email protected]> #\n# Copyright 2017 Mike Miller <[email protected]> #\n# Copyright 2018 Darragh Bailey <[email protected]> #\n# Copyright 2018 Wan Liuyang <[email protected]> #\n# Copyright 2018 sfdye <[email protected]> #\n# #\n# This file is part of PyGithub. #\n# http://pygithub.readthedocs.io/ #\n# #\n# PyGithub is free software: you can redistribute it and/or modify it under #\n# the terms of the GNU Lesser General Public License as published by the Free #\n# Software Foundation, either version 3 of the License, or (at your option) #\n# any later version. #\n# #\n# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #\n# details. #\n# #\n# You should have received a copy of the GNU Lesser General Public License #\n# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #\n# #\n################################################################################\n\nimport github.GithubObject\n\nimport github.NamedUser\n\n\nclass PullRequestReview(github.GithubObject.CompletableGithubObject):\n \"\"\"\n This class represents PullRequestReviews. The reference can be found here https://developer.github.com/v3/pulls/reviews/\n \"\"\"\n\n def __repr__(self):\n return self.get__repr__({\"id\": self._id.value, \"user\": self._user.value})\n\n @property\n def id(self):\n \"\"\"\n :type: integer\n \"\"\"\n self._completeIfNotSet(self._id)\n return self._id.value\n\n @property\n def user(self):\n \"\"\"\n :type: :class:`github.NamedUser.NamedUser`\n \"\"\"\n self._completeIfNotSet(self._user)\n return self._user.value\n\n @property\n def body(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._body)\n return self._body.value\n\n @property\n def commit_id(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._commit_id)\n return self._commit_id.value\n\n @property\n def state(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._state)\n return self._state.value\n\n @property\n def url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._url)\n return self._url.value\n\n @property\n def html_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._html_url)\n return self._html_url.value\n\n @property\n def pull_request_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._pull_request_url)\n return self._pull_request_url.value\n\n @property\n def submitted_at(self):\n \"\"\"\n :type: datetime.datetime\n \"\"\"\n self._completeIfNotSet(self._submitted_at)\n return self._submitted_at.value\n\n def _initAttributes(self):\n self._id = github.GithubObject.NotSet\n self._user = github.GithubObject.NotSet\n self._body = github.GithubObject.NotSet\n self._commit_id = github.GithubObject.NotSet\n self._state = github.GithubObject.NotSet\n self._url = github.GithubObject.NotSet\n self._html_url = github.GithubObject.NotSet\n self._pull_request_url = github.GithubObject.NotSet\n self._submitted_at = github.GithubObject.NotSet\n\n def _useAttributes(self, attributes):\n if \"id\" in attributes: # pragma no branch\n self._id = self._makeIntAttribute(attributes[\"id\"])\n if \"user\" in attributes: # pragma no branch\n self._user = self._makeClassAttribute(github.NamedUser.NamedUser, attributes[\"user\"])\n if \"body\" in attributes: # pragma no branch\n self._body = self._makeStringAttribute(attributes[\"body\"])\n if \"commit_id\" in attributes: # pragma no branch\n self._commit_id = self._makeStringAttribute(attributes[\"commit_id\"])\n if \"state\" in attributes: # pragma no branch\n self._state = self._makeStringAttribute(attributes[\"state\"])\n if \"url\" in attributes: # pragma no branch\n self._url = self._makeStringAttribute(attributes[\"url\"])\n if \"html_url\" in attributes: # pragma no branch\n self._html_url = self._makeStringAttribute(attributes[\"html_url\"])\n if \"pull_request_url\" in attributes: # pragma no branch\n self._pull_request_url = self._makeStringAttribute(attributes[\"pull_request_url\"])\n if \"submitted_at\" in attributes: # pragma no branch\n self._submitted_at = self._makeDatetimeAttribute(attributes[\"submitted_at\"])\n", "path": "github/PullRequestReview.py"}]}
2,078
235
gh_patches_debug_19909
rasdani/github-patches
git_diff
pyg-team__pytorch_geometric-5441
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> The the feature dim of data.x is zero in Proteins dataset with the pyg version after 2.0.5 ### 🐛 Describe the bug The main reason is in line 136 of tu_dataset.py it is strange that the value of num_edge_attributes is larger than the feature dimension of self.data.x in proteins, which leads to the resulting dimension of self.data.x is num_nodes*0 ### Environment * PyG version: * PyTorch version: * OS: * Python version: * CUDA/cuDNN version: * How you installed PyTorch and PyG (`conda`, `pip`, source): * Any other relevant information (*e.g.*, version of `torch-scatter`): </issue> <code> [start of torch_geometric/io/tu.py] 1 import glob 2 import os 3 import os.path as osp 4 5 import numpy as np 6 import torch 7 import torch.nn.functional as F 8 from torch_sparse import coalesce 9 10 from torch_geometric.data import Data 11 from torch_geometric.io import read_txt_array 12 from torch_geometric.utils import remove_self_loops 13 14 names = [ 15 'A', 'graph_indicator', 'node_labels', 'node_attributes' 16 'edge_labels', 'edge_attributes', 'graph_labels', 'graph_attributes' 17 ] 18 19 20 def read_tu_data(folder, prefix): 21 files = glob.glob(osp.join(folder, f'{prefix}_*.txt')) 22 names = [f.split(os.sep)[-1][len(prefix) + 1:-4] for f in files] 23 24 edge_index = read_file(folder, prefix, 'A', torch.long).t() - 1 25 batch = read_file(folder, prefix, 'graph_indicator', torch.long) - 1 26 27 node_attributes = torch.empty((batch.size(0), 0)) 28 if 'node_attributes' in names: 29 node_attributes = read_file(folder, prefix, 'node_attributes') 30 31 node_labels = torch.empty((batch.size(0), 0)) 32 if 'node_labels' in names: 33 node_labels = read_file(folder, prefix, 'node_labels', torch.long) 34 if node_labels.dim() == 1: 35 node_labels = node_labels.unsqueeze(-1) 36 node_labels = node_labels - node_labels.min(dim=0)[0] 37 node_labels = node_labels.unbind(dim=-1) 38 node_labels = [F.one_hot(x, num_classes=-1) for x in node_labels] 39 node_labels = torch.cat(node_labels, dim=-1).to(torch.float) 40 41 edge_attributes = torch.empty((edge_index.size(1), 0)) 42 if 'edge_attributes' in names: 43 edge_attributes = read_file(folder, prefix, 'edge_attributes') 44 45 edge_labels = torch.empty((edge_index.size(1), 0)) 46 if 'edge_labels' in names: 47 edge_labels = read_file(folder, prefix, 'edge_labels', torch.long) 48 if edge_labels.dim() == 1: 49 edge_labels = edge_labels.unsqueeze(-1) 50 edge_labels = edge_labels - edge_labels.min(dim=0)[0] 51 edge_labels = edge_labels.unbind(dim=-1) 52 edge_labels = [F.one_hot(e, num_classes=-1) for e in edge_labels] 53 edge_labels = torch.cat(edge_labels, dim=-1).to(torch.float) 54 55 x = cat([node_attributes, node_labels]) 56 edge_attr = cat([edge_attributes, edge_labels]) 57 58 y = None 59 if 'graph_attributes' in names: # Regression problem. 60 y = read_file(folder, prefix, 'graph_attributes') 61 elif 'graph_labels' in names: # Classification problem. 62 y = read_file(folder, prefix, 'graph_labels', torch.long) 63 _, y = y.unique(sorted=True, return_inverse=True) 64 65 num_nodes = edge_index.max().item() + 1 if x is None else x.size(0) 66 edge_index, edge_attr = remove_self_loops(edge_index, edge_attr) 67 edge_index, edge_attr = coalesce(edge_index, edge_attr, num_nodes, 68 num_nodes) 69 70 data = Data(x=x, edge_index=edge_index, edge_attr=edge_attr, y=y) 71 data, slices = split(data, batch) 72 73 sizes = { 74 'num_node_attributes': node_attributes.size(-1), 75 'num_node_labels': node_labels.size(-1), 76 'num_edge_attributes': edge_attributes.size(-1), 77 'num_edge_labels': edge_labels.size(-1), 78 } 79 80 return data, slices, sizes 81 82 83 def read_file(folder, prefix, name, dtype=None): 84 path = osp.join(folder, f'{prefix}_{name}.txt') 85 return read_txt_array(path, sep=',', dtype=dtype) 86 87 88 def cat(seq): 89 seq = [item for item in seq if item is not None] 90 seq = [item for item in seq if item.numel() > 0] 91 seq = [item.unsqueeze(-1) if item.dim() == 1 else item for item in seq] 92 return torch.cat(seq, dim=-1) if len(seq) > 0 else None 93 94 95 def split(data, batch): 96 node_slice = torch.cumsum(torch.from_numpy(np.bincount(batch)), 0) 97 node_slice = torch.cat([torch.tensor([0]), node_slice]) 98 99 row, _ = data.edge_index 100 edge_slice = torch.cumsum(torch.from_numpy(np.bincount(batch[row])), 0) 101 edge_slice = torch.cat([torch.tensor([0]), edge_slice]) 102 103 # Edge indices should start at zero for every graph. 104 data.edge_index -= node_slice[batch[row]].unsqueeze(0) 105 106 slices = {'edge_index': edge_slice} 107 if data.x is not None: 108 slices['x'] = node_slice 109 else: 110 # Imitate `collate` functionality: 111 data._num_nodes = torch.bincount(batch).tolist() 112 data.num_nodes = batch.numel() 113 if data.edge_attr is not None: 114 slices['edge_attr'] = edge_slice 115 if data.y is not None: 116 if data.y.size(0) == batch.size(0): 117 slices['y'] = node_slice 118 else: 119 slices['y'] = torch.arange(0, batch[-1] + 2, dtype=torch.long) 120 121 return data, slices 122 [end of torch_geometric/io/tu.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/torch_geometric/io/tu.py b/torch_geometric/io/tu.py --- a/torch_geometric/io/tu.py +++ b/torch_geometric/io/tu.py @@ -27,6 +27,8 @@ node_attributes = torch.empty((batch.size(0), 0)) if 'node_attributes' in names: node_attributes = read_file(folder, prefix, 'node_attributes') + if node_attributes.dim() == 1: + node_attributes = node_attributes.unsqueeze(-1) node_labels = torch.empty((batch.size(0), 0)) if 'node_labels' in names: @@ -41,6 +43,8 @@ edge_attributes = torch.empty((edge_index.size(1), 0)) if 'edge_attributes' in names: edge_attributes = read_file(folder, prefix, 'edge_attributes') + if edge_attributes.dim() == 1: + edge_attributes = edge_attributes.unsqueeze(-1) edge_labels = torch.empty((edge_index.size(1), 0)) if 'edge_labels' in names:
{"golden_diff": "diff --git a/torch_geometric/io/tu.py b/torch_geometric/io/tu.py\n--- a/torch_geometric/io/tu.py\n+++ b/torch_geometric/io/tu.py\n@@ -27,6 +27,8 @@\n node_attributes = torch.empty((batch.size(0), 0))\n if 'node_attributes' in names:\n node_attributes = read_file(folder, prefix, 'node_attributes')\n+ if node_attributes.dim() == 1:\n+ node_attributes = node_attributes.unsqueeze(-1)\n \n node_labels = torch.empty((batch.size(0), 0))\n if 'node_labels' in names:\n@@ -41,6 +43,8 @@\n edge_attributes = torch.empty((edge_index.size(1), 0))\n if 'edge_attributes' in names:\n edge_attributes = read_file(folder, prefix, 'edge_attributes')\n+ if edge_attributes.dim() == 1:\n+ edge_attributes = edge_attributes.unsqueeze(-1)\n \n edge_labels = torch.empty((edge_index.size(1), 0))\n if 'edge_labels' in names:\n", "issue": "The the feature dim of data.x is zero in Proteins dataset with the pyg version after 2.0.5\n### \ud83d\udc1b Describe the bug\n\nThe main reason is in line 136 of tu_dataset.py\r\n\r\nit is strange that the value of num_edge_attributes is larger than the feature dimension of self.data.x in proteins, which leads to the resulting dimension of self.data.x is num_nodes*0\r\n\n\n### Environment\n\n* PyG version:\r\n* PyTorch version:\r\n* OS:\r\n* Python version:\r\n* CUDA/cuDNN version:\r\n* How you installed PyTorch and PyG (`conda`, `pip`, source):\r\n* Any other relevant information (*e.g.*, version of `torch-scatter`):\r\n\n", "before_files": [{"content": "import glob\nimport os\nimport os.path as osp\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torch_sparse import coalesce\n\nfrom torch_geometric.data import Data\nfrom torch_geometric.io import read_txt_array\nfrom torch_geometric.utils import remove_self_loops\n\nnames = [\n 'A', 'graph_indicator', 'node_labels', 'node_attributes'\n 'edge_labels', 'edge_attributes', 'graph_labels', 'graph_attributes'\n]\n\n\ndef read_tu_data(folder, prefix):\n files = glob.glob(osp.join(folder, f'{prefix}_*.txt'))\n names = [f.split(os.sep)[-1][len(prefix) + 1:-4] for f in files]\n\n edge_index = read_file(folder, prefix, 'A', torch.long).t() - 1\n batch = read_file(folder, prefix, 'graph_indicator', torch.long) - 1\n\n node_attributes = torch.empty((batch.size(0), 0))\n if 'node_attributes' in names:\n node_attributes = read_file(folder, prefix, 'node_attributes')\n\n node_labels = torch.empty((batch.size(0), 0))\n if 'node_labels' in names:\n node_labels = read_file(folder, prefix, 'node_labels', torch.long)\n if node_labels.dim() == 1:\n node_labels = node_labels.unsqueeze(-1)\n node_labels = node_labels - node_labels.min(dim=0)[0]\n node_labels = node_labels.unbind(dim=-1)\n node_labels = [F.one_hot(x, num_classes=-1) for x in node_labels]\n node_labels = torch.cat(node_labels, dim=-1).to(torch.float)\n\n edge_attributes = torch.empty((edge_index.size(1), 0))\n if 'edge_attributes' in names:\n edge_attributes = read_file(folder, prefix, 'edge_attributes')\n\n edge_labels = torch.empty((edge_index.size(1), 0))\n if 'edge_labels' in names:\n edge_labels = read_file(folder, prefix, 'edge_labels', torch.long)\n if edge_labels.dim() == 1:\n edge_labels = edge_labels.unsqueeze(-1)\n edge_labels = edge_labels - edge_labels.min(dim=0)[0]\n edge_labels = edge_labels.unbind(dim=-1)\n edge_labels = [F.one_hot(e, num_classes=-1) for e in edge_labels]\n edge_labels = torch.cat(edge_labels, dim=-1).to(torch.float)\n\n x = cat([node_attributes, node_labels])\n edge_attr = cat([edge_attributes, edge_labels])\n\n y = None\n if 'graph_attributes' in names: # Regression problem.\n y = read_file(folder, prefix, 'graph_attributes')\n elif 'graph_labels' in names: # Classification problem.\n y = read_file(folder, prefix, 'graph_labels', torch.long)\n _, y = y.unique(sorted=True, return_inverse=True)\n\n num_nodes = edge_index.max().item() + 1 if x is None else x.size(0)\n edge_index, edge_attr = remove_self_loops(edge_index, edge_attr)\n edge_index, edge_attr = coalesce(edge_index, edge_attr, num_nodes,\n num_nodes)\n\n data = Data(x=x, edge_index=edge_index, edge_attr=edge_attr, y=y)\n data, slices = split(data, batch)\n\n sizes = {\n 'num_node_attributes': node_attributes.size(-1),\n 'num_node_labels': node_labels.size(-1),\n 'num_edge_attributes': edge_attributes.size(-1),\n 'num_edge_labels': edge_labels.size(-1),\n }\n\n return data, slices, sizes\n\n\ndef read_file(folder, prefix, name, dtype=None):\n path = osp.join(folder, f'{prefix}_{name}.txt')\n return read_txt_array(path, sep=',', dtype=dtype)\n\n\ndef cat(seq):\n seq = [item for item in seq if item is not None]\n seq = [item for item in seq if item.numel() > 0]\n seq = [item.unsqueeze(-1) if item.dim() == 1 else item for item in seq]\n return torch.cat(seq, dim=-1) if len(seq) > 0 else None\n\n\ndef split(data, batch):\n node_slice = torch.cumsum(torch.from_numpy(np.bincount(batch)), 0)\n node_slice = torch.cat([torch.tensor([0]), node_slice])\n\n row, _ = data.edge_index\n edge_slice = torch.cumsum(torch.from_numpy(np.bincount(batch[row])), 0)\n edge_slice = torch.cat([torch.tensor([0]), edge_slice])\n\n # Edge indices should start at zero for every graph.\n data.edge_index -= node_slice[batch[row]].unsqueeze(0)\n\n slices = {'edge_index': edge_slice}\n if data.x is not None:\n slices['x'] = node_slice\n else:\n # Imitate `collate` functionality:\n data._num_nodes = torch.bincount(batch).tolist()\n data.num_nodes = batch.numel()\n if data.edge_attr is not None:\n slices['edge_attr'] = edge_slice\n if data.y is not None:\n if data.y.size(0) == batch.size(0):\n slices['y'] = node_slice\n else:\n slices['y'] = torch.arange(0, batch[-1] + 2, dtype=torch.long)\n\n return data, slices\n", "path": "torch_geometric/io/tu.py"}]}
2,127
240
gh_patches_debug_9496
rasdani/github-patches
git_diff
rotki__rotki-2260
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Staked Cream price appears incorrectly In version 1.13.0 of Rotki the staked Cream price and logo appear incorrectly. The CRM's price and logo show up instead of CREAM's. The previous version of Rotki was showing the price correctly but the logo was still incorrect. I think cryptocompare is used as price oracle for CREAM. </issue> <code> [start of rotkehlchen/icons.py] 1 import itertools 2 import logging 3 from pathlib import Path 4 from typing import Optional, Set 5 6 import gevent 7 import requests 8 from typing_extensions import Literal 9 10 from rotkehlchen.assets.asset import Asset 11 from rotkehlchen.assets.resolver import AssetResolver, asset_type_mapping 12 from rotkehlchen.errors import RemoteError 13 from rotkehlchen.externalapis.coingecko import Coingecko, DELISTED_ASSETS 14 from rotkehlchen.typing import AssetType 15 from rotkehlchen.utils.hashing import file_md5 16 from rotkehlchen.constants.timing import DEFAULT_TIMEOUT_TUPLE 17 18 log = logging.getLogger(__name__) 19 20 21 class IconManager(): 22 """ 23 Manages the icons for all the assets of the application 24 25 The get_icon() and the periodic task of query_uncached_icons_batch() may at 26 a point query the same icon but that's fine and not worth of locking mechanism as 27 it should be rather rare and worst case scenario once in a blue moon we waste 28 an API call. In the end the right file would be written on disk. 29 """ 30 31 def __init__(self, data_dir: Path, coingecko: Coingecko) -> None: 32 self.icons_dir = data_dir / 'icons' 33 self.coingecko = coingecko 34 self.icons_dir.mkdir(parents=True, exist_ok=True) 35 self.failed_assets: Set[Asset] = set() 36 37 def iconfile_path(self, asset: Asset, size: Literal['thumb', 'small', 'large']) -> Path: 38 return self.icons_dir / f'{asset.identifier}_{size}.png' 39 40 def iconfile_md5( 41 self, 42 asset: Asset, 43 size: Literal['thumb', 'small', 'large'], 44 ) -> Optional[str]: 45 path = self.iconfile_path(asset, size) 46 if not path.is_file(): 47 return None 48 49 return file_md5(path) 50 51 def _query_coingecko_for_icon(self, asset: Asset) -> bool: 52 """Queries coingecko for icons of an asset 53 54 If query was okay it returns True, else False 55 """ 56 # Do not bother querying if asset is delisted. Nothing is returned. 57 # we only keep delisted asset coingecko mappings since historical prices 58 # can still be queried. 59 if asset.identifier in DELISTED_ASSETS: 60 self.failed_assets.add(asset) 61 return False 62 63 try: 64 data = self.coingecko.asset_data(asset) 65 except RemoteError as e: 66 log.warning( 67 f'Problem querying coingecko for asset data of {asset.identifier}: {str(e)}', 68 ) 69 # If a query fails (99% of fails will be 404s) don't repeat them 70 self.failed_assets.add(asset) 71 return False 72 73 for size in ('thumb', 'small', 'large'): 74 url = getattr(data.images, size) 75 try: 76 response = requests.get(url, timeout=DEFAULT_TIMEOUT_TUPLE) 77 except requests.exceptions.RequestException: 78 # Any problem getting the image skip it: https://github.com/rotki/rotki/issues/1370 79 continue 80 81 with open(self.iconfile_path(asset, size), 'wb') as f: # type: ignore 82 f.write(response.content) 83 84 return True 85 86 def get_icon( 87 self, 88 asset: Asset, given_size: Literal['thumb', 'small', 'large'], 89 ) -> Optional[bytes]: 90 """Returns the byte data of the requested icon 91 92 If the icon can't be found it returns None. 93 94 If the icon is found cached locally it's returned directly. 95 96 If not, all icons of the asset are queried from coingecko and cached 97 locally before the requested data are returned. 98 """ 99 if not asset.has_coingecko(): 100 return None 101 102 needed_path = self.iconfile_path(asset, given_size) 103 if needed_path.is_file(): 104 with open(needed_path, 'rb') as f: 105 image_data = f.read() 106 return image_data 107 108 # else query coingecko for the icons and cache all of them 109 if self._query_coingecko_for_icon(asset) is False: 110 return None 111 112 if not needed_path.is_file(): 113 return None 114 115 with open(needed_path, 'rb') as f: 116 image_data = f.read() 117 return image_data 118 119 def query_uncached_icons_batch(self, batch_size: int) -> bool: 120 """Queries a batch of uncached icons for assets 121 122 Returns true if there is more icons left to cache after this batch. 123 """ 124 coingecko_integrated_assets = [] 125 126 for identifier, asset_data in AssetResolver().assets.items(): 127 asset_type = asset_type_mapping[asset_data['type']] 128 if asset_type != AssetType.FIAT and asset_data['coingecko'] != '': 129 coingecko_integrated_assets.append(identifier) 130 131 cached_assets = [ 132 str(x.name)[:-10] for x in self.icons_dir.glob('*_thumb.png') if x.is_file() 133 ] 134 uncached_assets = ( 135 set(coingecko_integrated_assets) - set(cached_assets) - self.failed_assets 136 ) 137 log.info( 138 f'Periodic task to query coingecko for {batch_size} uncached asset icons. ' 139 f'Uncached assets: {len(uncached_assets)}. Cached assets: {len(cached_assets)}', 140 ) 141 for asset_name in itertools.islice(uncached_assets, batch_size): 142 self._query_coingecko_for_icon(Asset(asset_name)) 143 144 return len(uncached_assets) > batch_size 145 146 def periodically_query_icons_until_all_cached( 147 self, 148 batch_size: int, 149 sleep_time_secs: float, 150 ) -> None: 151 """Periodically query all uncached icons until we have icons cached for all 152 of the known assets that have coingecko integration""" 153 if batch_size == 0: 154 return 155 156 while True: 157 carry_on = self.query_uncached_icons_batch(batch_size=batch_size) 158 if not carry_on: 159 break 160 gevent.sleep(sleep_time_secs) 161 [end of rotkehlchen/icons.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/rotkehlchen/icons.py b/rotkehlchen/icons.py --- a/rotkehlchen/icons.py +++ b/rotkehlchen/icons.py @@ -26,7 +26,7 @@ a point query the same icon but that's fine and not worth of locking mechanism as it should be rather rare and worst case scenario once in a blue moon we waste an API call. In the end the right file would be written on disk. -""" + """ def __init__(self, data_dir: Path, coingecko: Coingecko) -> None: self.icons_dir = data_dir / 'icons'
{"golden_diff": "diff --git a/rotkehlchen/icons.py b/rotkehlchen/icons.py\n--- a/rotkehlchen/icons.py\n+++ b/rotkehlchen/icons.py\n@@ -26,7 +26,7 @@\n a point query the same icon but that's fine and not worth of locking mechanism as\n it should be rather rare and worst case scenario once in a blue moon we waste\n an API call. In the end the right file would be written on disk.\n-\"\"\"\n+ \"\"\"\n \n def __init__(self, data_dir: Path, coingecko: Coingecko) -> None:\n self.icons_dir = data_dir / 'icons'\n", "issue": "Staked Cream price appears incorrectly\nIn version 1.13.0 of Rotki the staked Cream price and logo appear incorrectly. The CRM's price and logo show up instead of CREAM's.\r\n\r\nThe previous version of Rotki was showing the price correctly but the logo was still incorrect.\r\n\r\nI think cryptocompare is used as price oracle for CREAM.\n", "before_files": [{"content": "import itertools\nimport logging\nfrom pathlib import Path\nfrom typing import Optional, Set\n\nimport gevent\nimport requests\nfrom typing_extensions import Literal\n\nfrom rotkehlchen.assets.asset import Asset\nfrom rotkehlchen.assets.resolver import AssetResolver, asset_type_mapping\nfrom rotkehlchen.errors import RemoteError\nfrom rotkehlchen.externalapis.coingecko import Coingecko, DELISTED_ASSETS\nfrom rotkehlchen.typing import AssetType\nfrom rotkehlchen.utils.hashing import file_md5\nfrom rotkehlchen.constants.timing import DEFAULT_TIMEOUT_TUPLE\n\nlog = logging.getLogger(__name__)\n\n\nclass IconManager():\n \"\"\"\n Manages the icons for all the assets of the application\n\n The get_icon() and the periodic task of query_uncached_icons_batch() may at\n a point query the same icon but that's fine and not worth of locking mechanism as\n it should be rather rare and worst case scenario once in a blue moon we waste\n an API call. In the end the right file would be written on disk.\n\"\"\"\n\n def __init__(self, data_dir: Path, coingecko: Coingecko) -> None:\n self.icons_dir = data_dir / 'icons'\n self.coingecko = coingecko\n self.icons_dir.mkdir(parents=True, exist_ok=True)\n self.failed_assets: Set[Asset] = set()\n\n def iconfile_path(self, asset: Asset, size: Literal['thumb', 'small', 'large']) -> Path:\n return self.icons_dir / f'{asset.identifier}_{size}.png'\n\n def iconfile_md5(\n self,\n asset: Asset,\n size: Literal['thumb', 'small', 'large'],\n ) -> Optional[str]:\n path = self.iconfile_path(asset, size)\n if not path.is_file():\n return None\n\n return file_md5(path)\n\n def _query_coingecko_for_icon(self, asset: Asset) -> bool:\n \"\"\"Queries coingecko for icons of an asset\n\n If query was okay it returns True, else False\n \"\"\"\n # Do not bother querying if asset is delisted. Nothing is returned.\n # we only keep delisted asset coingecko mappings since historical prices\n # can still be queried.\n if asset.identifier in DELISTED_ASSETS:\n self.failed_assets.add(asset)\n return False\n\n try:\n data = self.coingecko.asset_data(asset)\n except RemoteError as e:\n log.warning(\n f'Problem querying coingecko for asset data of {asset.identifier}: {str(e)}',\n )\n # If a query fails (99% of fails will be 404s) don't repeat them\n self.failed_assets.add(asset)\n return False\n\n for size in ('thumb', 'small', 'large'):\n url = getattr(data.images, size)\n try:\n response = requests.get(url, timeout=DEFAULT_TIMEOUT_TUPLE)\n except requests.exceptions.RequestException:\n # Any problem getting the image skip it: https://github.com/rotki/rotki/issues/1370\n continue\n\n with open(self.iconfile_path(asset, size), 'wb') as f: # type: ignore\n f.write(response.content)\n\n return True\n\n def get_icon(\n self,\n asset: Asset, given_size: Literal['thumb', 'small', 'large'],\n ) -> Optional[bytes]:\n \"\"\"Returns the byte data of the requested icon\n\n If the icon can't be found it returns None.\n\n If the icon is found cached locally it's returned directly.\n\n If not, all icons of the asset are queried from coingecko and cached\n locally before the requested data are returned.\n \"\"\"\n if not asset.has_coingecko():\n return None\n\n needed_path = self.iconfile_path(asset, given_size)\n if needed_path.is_file():\n with open(needed_path, 'rb') as f:\n image_data = f.read()\n return image_data\n\n # else query coingecko for the icons and cache all of them\n if self._query_coingecko_for_icon(asset) is False:\n return None\n\n if not needed_path.is_file():\n return None\n\n with open(needed_path, 'rb') as f:\n image_data = f.read()\n return image_data\n\n def query_uncached_icons_batch(self, batch_size: int) -> bool:\n \"\"\"Queries a batch of uncached icons for assets\n\n Returns true if there is more icons left to cache after this batch.\n \"\"\"\n coingecko_integrated_assets = []\n\n for identifier, asset_data in AssetResolver().assets.items():\n asset_type = asset_type_mapping[asset_data['type']]\n if asset_type != AssetType.FIAT and asset_data['coingecko'] != '':\n coingecko_integrated_assets.append(identifier)\n\n cached_assets = [\n str(x.name)[:-10] for x in self.icons_dir.glob('*_thumb.png') if x.is_file()\n ]\n uncached_assets = (\n set(coingecko_integrated_assets) - set(cached_assets) - self.failed_assets\n )\n log.info(\n f'Periodic task to query coingecko for {batch_size} uncached asset icons. '\n f'Uncached assets: {len(uncached_assets)}. Cached assets: {len(cached_assets)}',\n )\n for asset_name in itertools.islice(uncached_assets, batch_size):\n self._query_coingecko_for_icon(Asset(asset_name))\n\n return len(uncached_assets) > batch_size\n\n def periodically_query_icons_until_all_cached(\n self,\n batch_size: int,\n sleep_time_secs: float,\n ) -> None:\n \"\"\"Periodically query all uncached icons until we have icons cached for all\n of the known assets that have coingecko integration\"\"\"\n if batch_size == 0:\n return\n\n while True:\n carry_on = self.query_uncached_icons_batch(batch_size=batch_size)\n if not carry_on:\n break\n gevent.sleep(sleep_time_secs)\n", "path": "rotkehlchen/icons.py"}]}
2,315
146
gh_patches_debug_4092
rasdani/github-patches
git_diff
zulip__zulip-27536
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Document ability to drag-and-drop anywhere to upload a file We should document the functionality introduced in #14579 / https://github.com/zulip/zulip/pull/26041. I would probably replace the "Via Markdown" tab with "Via drag-and-drop", and modify the instructions to explain that you can drag anywhere, whether or not the compose box is open. </issue> <code> [start of zerver/lib/markdown/tabbed_sections.py] 1 import re 2 from typing import Any, Dict, List, Mapping, Optional 3 4 import markdown 5 from markdown.extensions import Extension 6 from markdown.preprocessors import Preprocessor 7 from typing_extensions import override 8 9 from zerver.lib.markdown.priorities import PREPROCESSOR_PRIORITES 10 11 START_TABBED_SECTION_REGEX = re.compile(r"^\{start_tabs\}$") 12 END_TABBED_SECTION_REGEX = re.compile(r"^\{end_tabs\}$") 13 TAB_CONTENT_REGEX = re.compile(r"^\{tab\|([^}]+)\}$") 14 15 TABBED_SECTION_TEMPLATE = """ 16 <div class="tabbed-section {tab_class}" markdown="1"> 17 {nav_bar} 18 <div class="blocks"> 19 {blocks} 20 </div> 21 </div> 22 """.strip() 23 24 NAV_BAR_TEMPLATE = """ 25 <ul class="nav"> 26 {tabs} 27 </ul> 28 """.strip() 29 30 NAV_LIST_ITEM_TEMPLATE = """ 31 <li data-tab-key="{data_tab_key}" tabindex="0">{label}</li> 32 """.strip() 33 34 DIV_TAB_CONTENT_TEMPLATE = """ 35 <div data-tab-key="{data_tab_key}" markdown="1"> 36 {content} 37 </div> 38 """.strip() 39 40 # If adding new entries here, also check if you need to update 41 # tabbed-instructions.js 42 TAB_SECTION_LABELS = { 43 "desktop-web": "Desktop/Web", 44 "ios": "iOS", 45 "android": "Android", 46 "mac": "macOS", 47 "windows": "Windows", 48 "linux": "Linux", 49 "python": "Python", 50 "js": "JavaScript", 51 "curl": "curl", 52 "zulip-send": "zulip-send", 53 "web": "Web", 54 "desktop": "Desktop", 55 "mobile": "Mobile", 56 "mm-default": "Default installation", 57 "mm-cloud": "Cloud instance", 58 "mm-docker": "Docker", 59 "mm-gitlab-omnibus": "GitLab Omnibus", 60 "mm-self-hosting-cloud-export": "Self hosting (cloud export)", 61 "require-invitations": "Require invitations", 62 "allow-anyone-to-join": "Allow anyone to join", 63 "restrict-by-email-domain": "Restrict by email domain", 64 "zoom": "Zoom", 65 "jitsi-meet": "Jitsi Meet", 66 "bigbluebutton": "BigBlueButton", 67 "disable": "Disabled", 68 "chrome": "Chrome", 69 "firefox": "Firefox", 70 "desktop-app": "Desktop app", 71 "system-proxy-settings": "System proxy settings", 72 "custom-proxy-settings": "Custom proxy settings", 73 "stream": "From a stream view", 74 "not-stream": "From other views", 75 "via-recent-conversations": "Via recent conversations", 76 "via-inbox-view": "Via inbox view", 77 "via-left-sidebar": "Via left sidebar", 78 "instructions-for-all-platforms": "Instructions for all platforms", 79 "public-streams": "Public streams", 80 "private-streams": "Private streams", 81 "web-public-streams": "Web-public streams", 82 "via-user-card": "Via user card", 83 "via-user-profile": "Via user profile", 84 "via-organization-settings": "Via organization settings", 85 "via-personal-settings": "Via personal settings", 86 "via-stream-settings": "Via stream settings", 87 "default-subdomain": "Default subdomain", 88 "custom-subdomain": "Custom subdomain", 89 "zulip-cloud": "Zulip Cloud", 90 "self-hosting": "Self hosting", 91 "okta": "Okta", 92 "onelogin": "OneLogin", 93 "azuread": "AzureAD", 94 "keycloak": "Keycloak", 95 "auth0": "Auth0", 96 "logged-in": "If you are logged in", 97 "logged-out": "If you are logged out", 98 "user": "User", 99 "bot": "Bot", 100 "on-sign-up": "On sign-up", 101 "via-paste": "Via paste", 102 "via-markdown": "Via Markdown", 103 "via-compose-box-buttons": "Via compose box buttons", 104 "stream-compose": "Compose to a stream", 105 "dm-compose": "Compose a DM", 106 "v6": "Zulip Server 6.0+", 107 "v4": "Zulip Server 4.0+", 108 } 109 110 111 class TabbedSectionsGenerator(Extension): 112 @override 113 def extendMarkdown(self, md: markdown.Markdown) -> None: 114 md.preprocessors.register( 115 TabbedSectionsPreprocessor(md, self.getConfigs()), 116 "tabbed_sections", 117 PREPROCESSOR_PRIORITES["tabbed_sections"], 118 ) 119 120 121 class TabbedSectionsPreprocessor(Preprocessor): 122 def __init__(self, md: markdown.Markdown, config: Mapping[str, Any]) -> None: 123 super().__init__(md) 124 125 @override 126 def run(self, lines: List[str]) -> List[str]: 127 tab_section = self.parse_tabs(lines) 128 while tab_section: 129 if "tabs" in tab_section: 130 tab_class = "has-tabs" 131 else: 132 tab_class = "no-tabs" 133 tab_section["tabs"] = [ 134 { 135 "tab_key": "instructions-for-all-platforms", 136 "start": tab_section["start_tabs_index"], 137 } 138 ] 139 nav_bar = self.generate_nav_bar(tab_section) 140 content_blocks = self.generate_content_blocks(tab_section, lines) 141 rendered_tabs = TABBED_SECTION_TEMPLATE.format( 142 tab_class=tab_class, nav_bar=nav_bar, blocks=content_blocks 143 ) 144 145 start = tab_section["start_tabs_index"] 146 end = tab_section["end_tabs_index"] + 1 147 lines = [*lines[:start], rendered_tabs, *lines[end:]] 148 tab_section = self.parse_tabs(lines) 149 return lines 150 151 def generate_content_blocks(self, tab_section: Dict[str, Any], lines: List[str]) -> str: 152 tab_content_blocks = [] 153 for index, tab in enumerate(tab_section["tabs"]): 154 start_index = tab["start"] + 1 155 try: 156 # If there are more tabs, we can use the starting index 157 # of the next tab as the ending index of the previous one 158 end_index = tab_section["tabs"][index + 1]["start"] 159 except IndexError: 160 # Otherwise, just use the end of the entire section 161 end_index = tab_section["end_tabs_index"] 162 163 content = "\n".join(lines[start_index:end_index]).strip() 164 tab_content_block = DIV_TAB_CONTENT_TEMPLATE.format( 165 data_tab_key=tab["tab_key"], 166 # Wrapping the content in two newlines is necessary here. 167 # If we don't do this, the inner Markdown does not get 168 # rendered properly. 169 content=f"\n{content}\n", 170 ) 171 tab_content_blocks.append(tab_content_block) 172 return "\n".join(tab_content_blocks) 173 174 def generate_nav_bar(self, tab_section: Dict[str, Any]) -> str: 175 li_elements = [] 176 for tab in tab_section["tabs"]: 177 tab_key = tab.get("tab_key") 178 tab_label = TAB_SECTION_LABELS.get(tab_key) 179 if tab_label is None: 180 raise ValueError( 181 f"Tab '{tab_key}' is not present in TAB_SECTION_LABELS in zerver/lib/markdown/tabbed_sections.py" 182 ) 183 184 li = NAV_LIST_ITEM_TEMPLATE.format(data_tab_key=tab_key, label=tab_label) 185 li_elements.append(li) 186 187 return NAV_BAR_TEMPLATE.format(tabs="\n".join(li_elements)) 188 189 def parse_tabs(self, lines: List[str]) -> Optional[Dict[str, Any]]: 190 block: Dict[str, Any] = {} 191 for index, line in enumerate(lines): 192 start_match = START_TABBED_SECTION_REGEX.search(line) 193 if start_match: 194 block["start_tabs_index"] = index 195 196 tab_content_match = TAB_CONTENT_REGEX.search(line) 197 if tab_content_match: 198 block.setdefault("tabs", []) 199 tab = {"start": index, "tab_key": tab_content_match.group(1)} 200 block["tabs"].append(tab) 201 202 end_match = END_TABBED_SECTION_REGEX.search(line) 203 if end_match: 204 block["end_tabs_index"] = index 205 break 206 return block 207 208 209 def makeExtension(*args: Any, **kwargs: str) -> TabbedSectionsGenerator: 210 return TabbedSectionsGenerator(**kwargs) 211 [end of zerver/lib/markdown/tabbed_sections.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/zerver/lib/markdown/tabbed_sections.py b/zerver/lib/markdown/tabbed_sections.py --- a/zerver/lib/markdown/tabbed_sections.py +++ b/zerver/lib/markdown/tabbed_sections.py @@ -99,6 +99,7 @@ "bot": "Bot", "on-sign-up": "On sign-up", "via-paste": "Via paste", + "via-drag-and-drop": "Via drag-and-drop", "via-markdown": "Via Markdown", "via-compose-box-buttons": "Via compose box buttons", "stream-compose": "Compose to a stream",
{"golden_diff": "diff --git a/zerver/lib/markdown/tabbed_sections.py b/zerver/lib/markdown/tabbed_sections.py\n--- a/zerver/lib/markdown/tabbed_sections.py\n+++ b/zerver/lib/markdown/tabbed_sections.py\n@@ -99,6 +99,7 @@\n \"bot\": \"Bot\",\n \"on-sign-up\": \"On sign-up\",\n \"via-paste\": \"Via paste\",\n+ \"via-drag-and-drop\": \"Via drag-and-drop\",\n \"via-markdown\": \"Via Markdown\",\n \"via-compose-box-buttons\": \"Via compose box buttons\",\n \"stream-compose\": \"Compose to a stream\",\n", "issue": "Document ability to drag-and-drop anywhere to upload a file\nWe should document the functionality introduced in #14579 / https://github.com/zulip/zulip/pull/26041.\r\n\r\nI would probably replace the \"Via Markdown\" tab with \"Via drag-and-drop\", and modify the instructions to explain that you can drag anywhere, whether or not the compose box is open.\n", "before_files": [{"content": "import re\nfrom typing import Any, Dict, List, Mapping, Optional\n\nimport markdown\nfrom markdown.extensions import Extension\nfrom markdown.preprocessors import Preprocessor\nfrom typing_extensions import override\n\nfrom zerver.lib.markdown.priorities import PREPROCESSOR_PRIORITES\n\nSTART_TABBED_SECTION_REGEX = re.compile(r\"^\\{start_tabs\\}$\")\nEND_TABBED_SECTION_REGEX = re.compile(r\"^\\{end_tabs\\}$\")\nTAB_CONTENT_REGEX = re.compile(r\"^\\{tab\\|([^}]+)\\}$\")\n\nTABBED_SECTION_TEMPLATE = \"\"\"\n<div class=\"tabbed-section {tab_class}\" markdown=\"1\">\n{nav_bar}\n<div class=\"blocks\">\n{blocks}\n</div>\n</div>\n\"\"\".strip()\n\nNAV_BAR_TEMPLATE = \"\"\"\n<ul class=\"nav\">\n{tabs}\n</ul>\n\"\"\".strip()\n\nNAV_LIST_ITEM_TEMPLATE = \"\"\"\n<li data-tab-key=\"{data_tab_key}\" tabindex=\"0\">{label}</li>\n\"\"\".strip()\n\nDIV_TAB_CONTENT_TEMPLATE = \"\"\"\n<div data-tab-key=\"{data_tab_key}\" markdown=\"1\">\n{content}\n</div>\n\"\"\".strip()\n\n# If adding new entries here, also check if you need to update\n# tabbed-instructions.js\nTAB_SECTION_LABELS = {\n \"desktop-web\": \"Desktop/Web\",\n \"ios\": \"iOS\",\n \"android\": \"Android\",\n \"mac\": \"macOS\",\n \"windows\": \"Windows\",\n \"linux\": \"Linux\",\n \"python\": \"Python\",\n \"js\": \"JavaScript\",\n \"curl\": \"curl\",\n \"zulip-send\": \"zulip-send\",\n \"web\": \"Web\",\n \"desktop\": \"Desktop\",\n \"mobile\": \"Mobile\",\n \"mm-default\": \"Default installation\",\n \"mm-cloud\": \"Cloud instance\",\n \"mm-docker\": \"Docker\",\n \"mm-gitlab-omnibus\": \"GitLab Omnibus\",\n \"mm-self-hosting-cloud-export\": \"Self hosting (cloud export)\",\n \"require-invitations\": \"Require invitations\",\n \"allow-anyone-to-join\": \"Allow anyone to join\",\n \"restrict-by-email-domain\": \"Restrict by email domain\",\n \"zoom\": \"Zoom\",\n \"jitsi-meet\": \"Jitsi Meet\",\n \"bigbluebutton\": \"BigBlueButton\",\n \"disable\": \"Disabled\",\n \"chrome\": \"Chrome\",\n \"firefox\": \"Firefox\",\n \"desktop-app\": \"Desktop app\",\n \"system-proxy-settings\": \"System proxy settings\",\n \"custom-proxy-settings\": \"Custom proxy settings\",\n \"stream\": \"From a stream view\",\n \"not-stream\": \"From other views\",\n \"via-recent-conversations\": \"Via recent conversations\",\n \"via-inbox-view\": \"Via inbox view\",\n \"via-left-sidebar\": \"Via left sidebar\",\n \"instructions-for-all-platforms\": \"Instructions for all platforms\",\n \"public-streams\": \"Public streams\",\n \"private-streams\": \"Private streams\",\n \"web-public-streams\": \"Web-public streams\",\n \"via-user-card\": \"Via user card\",\n \"via-user-profile\": \"Via user profile\",\n \"via-organization-settings\": \"Via organization settings\",\n \"via-personal-settings\": \"Via personal settings\",\n \"via-stream-settings\": \"Via stream settings\",\n \"default-subdomain\": \"Default subdomain\",\n \"custom-subdomain\": \"Custom subdomain\",\n \"zulip-cloud\": \"Zulip Cloud\",\n \"self-hosting\": \"Self hosting\",\n \"okta\": \"Okta\",\n \"onelogin\": \"OneLogin\",\n \"azuread\": \"AzureAD\",\n \"keycloak\": \"Keycloak\",\n \"auth0\": \"Auth0\",\n \"logged-in\": \"If you are logged in\",\n \"logged-out\": \"If you are logged out\",\n \"user\": \"User\",\n \"bot\": \"Bot\",\n \"on-sign-up\": \"On sign-up\",\n \"via-paste\": \"Via paste\",\n \"via-markdown\": \"Via Markdown\",\n \"via-compose-box-buttons\": \"Via compose box buttons\",\n \"stream-compose\": \"Compose to a stream\",\n \"dm-compose\": \"Compose a DM\",\n \"v6\": \"Zulip Server 6.0+\",\n \"v4\": \"Zulip Server 4.0+\",\n}\n\n\nclass TabbedSectionsGenerator(Extension):\n @override\n def extendMarkdown(self, md: markdown.Markdown) -> None:\n md.preprocessors.register(\n TabbedSectionsPreprocessor(md, self.getConfigs()),\n \"tabbed_sections\",\n PREPROCESSOR_PRIORITES[\"tabbed_sections\"],\n )\n\n\nclass TabbedSectionsPreprocessor(Preprocessor):\n def __init__(self, md: markdown.Markdown, config: Mapping[str, Any]) -> None:\n super().__init__(md)\n\n @override\n def run(self, lines: List[str]) -> List[str]:\n tab_section = self.parse_tabs(lines)\n while tab_section:\n if \"tabs\" in tab_section:\n tab_class = \"has-tabs\"\n else:\n tab_class = \"no-tabs\"\n tab_section[\"tabs\"] = [\n {\n \"tab_key\": \"instructions-for-all-platforms\",\n \"start\": tab_section[\"start_tabs_index\"],\n }\n ]\n nav_bar = self.generate_nav_bar(tab_section)\n content_blocks = self.generate_content_blocks(tab_section, lines)\n rendered_tabs = TABBED_SECTION_TEMPLATE.format(\n tab_class=tab_class, nav_bar=nav_bar, blocks=content_blocks\n )\n\n start = tab_section[\"start_tabs_index\"]\n end = tab_section[\"end_tabs_index\"] + 1\n lines = [*lines[:start], rendered_tabs, *lines[end:]]\n tab_section = self.parse_tabs(lines)\n return lines\n\n def generate_content_blocks(self, tab_section: Dict[str, Any], lines: List[str]) -> str:\n tab_content_blocks = []\n for index, tab in enumerate(tab_section[\"tabs\"]):\n start_index = tab[\"start\"] + 1\n try:\n # If there are more tabs, we can use the starting index\n # of the next tab as the ending index of the previous one\n end_index = tab_section[\"tabs\"][index + 1][\"start\"]\n except IndexError:\n # Otherwise, just use the end of the entire section\n end_index = tab_section[\"end_tabs_index\"]\n\n content = \"\\n\".join(lines[start_index:end_index]).strip()\n tab_content_block = DIV_TAB_CONTENT_TEMPLATE.format(\n data_tab_key=tab[\"tab_key\"],\n # Wrapping the content in two newlines is necessary here.\n # If we don't do this, the inner Markdown does not get\n # rendered properly.\n content=f\"\\n{content}\\n\",\n )\n tab_content_blocks.append(tab_content_block)\n return \"\\n\".join(tab_content_blocks)\n\n def generate_nav_bar(self, tab_section: Dict[str, Any]) -> str:\n li_elements = []\n for tab in tab_section[\"tabs\"]:\n tab_key = tab.get(\"tab_key\")\n tab_label = TAB_SECTION_LABELS.get(tab_key)\n if tab_label is None:\n raise ValueError(\n f\"Tab '{tab_key}' is not present in TAB_SECTION_LABELS in zerver/lib/markdown/tabbed_sections.py\"\n )\n\n li = NAV_LIST_ITEM_TEMPLATE.format(data_tab_key=tab_key, label=tab_label)\n li_elements.append(li)\n\n return NAV_BAR_TEMPLATE.format(tabs=\"\\n\".join(li_elements))\n\n def parse_tabs(self, lines: List[str]) -> Optional[Dict[str, Any]]:\n block: Dict[str, Any] = {}\n for index, line in enumerate(lines):\n start_match = START_TABBED_SECTION_REGEX.search(line)\n if start_match:\n block[\"start_tabs_index\"] = index\n\n tab_content_match = TAB_CONTENT_REGEX.search(line)\n if tab_content_match:\n block.setdefault(\"tabs\", [])\n tab = {\"start\": index, \"tab_key\": tab_content_match.group(1)}\n block[\"tabs\"].append(tab)\n\n end_match = END_TABBED_SECTION_REGEX.search(line)\n if end_match:\n block[\"end_tabs_index\"] = index\n break\n return block\n\n\ndef makeExtension(*args: Any, **kwargs: str) -> TabbedSectionsGenerator:\n return TabbedSectionsGenerator(**kwargs)\n", "path": "zerver/lib/markdown/tabbed_sections.py"}]}
2,957
137
gh_patches_debug_30301
rasdani/github-patches
git_diff
napari__napari-4445
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> npe2 plugins need to be added to `napari --info` ## 🐛 Bug currently, `napari --info` doesn't include npe2 plugins </issue> <code> [start of napari/utils/info.py] 1 import os 2 import platform 3 import subprocess 4 import sys 5 6 import napari 7 8 OS_RELEASE_PATH = "/etc/os-release" 9 10 11 def _linux_sys_name(): 12 """ 13 Try to discover linux system name base on /etc/os-release file or lsb_release command output 14 https://www.freedesktop.org/software/systemd/man/os-release.html 15 """ 16 if os.path.exists(OS_RELEASE_PATH): 17 with open(OS_RELEASE_PATH) as f_p: 18 data = {} 19 for line in f_p: 20 field, value = line.split("=") 21 data[field.strip()] = value.strip().strip('"') 22 if "PRETTY_NAME" in data: 23 return data["PRETTY_NAME"] 24 if "NAME" in data: 25 if "VERSION" in data: 26 return f'{data["NAME"]} {data["VERSION"]}' 27 if "VERSION_ID" in data: 28 return f'{data["NAME"]} {data["VERSION_ID"]}' 29 return f'{data["NAME"]} (no version)' 30 31 try: 32 res = subprocess.run( 33 ["lsb_release", "-d", "-r"], check=True, capture_output=True 34 ) 35 text = res.stdout.decode() 36 data = {} 37 for line in text.split("\n"): 38 key, val = line.split(":") 39 data[key.strip()] = val.strip() 40 version_str = data["Description"] 41 if not version_str.endswith(data["Release"]): 42 version_str += " " + data["Release"] 43 return version_str 44 except subprocess.CalledProcessError: 45 pass 46 return "" 47 48 49 def _sys_name(): 50 """ 51 Discover MacOS or Linux Human readable information. For Linux provide information about distribution. 52 """ 53 try: 54 if sys.platform == "linux": 55 return _linux_sys_name() 56 if sys.platform == "darwin": 57 try: 58 res = subprocess.run( 59 ["sw_vers", "-productVersion"], 60 check=True, 61 capture_output=True, 62 ) 63 return f"MacOS {res.stdout.decode().strip()}" 64 except subprocess.CalledProcessError: 65 pass 66 except Exception: 67 pass 68 return "" 69 70 71 def sys_info(as_html=False): 72 """Gathers relevant module versions for troubleshooting purposes. 73 74 Parameters 75 ---------- 76 as_html : bool 77 if True, info will be returned as HTML, suitable for a QTextEdit widget 78 """ 79 from napari.plugins import plugin_manager 80 81 sys_version = sys.version.replace('\n', ' ') 82 text = ( 83 f"<b>napari</b>: {napari.__version__}<br>" 84 f"<b>Platform</b>: {platform.platform()}<br>" 85 ) 86 87 __sys_name = _sys_name() 88 if __sys_name: 89 text += f"<b>System</b>: {__sys_name}<br>" 90 91 text += f"<b>Python</b>: {sys_version}<br>" 92 93 try: 94 from qtpy import API_NAME, PYQT_VERSION, PYSIDE_VERSION, QtCore 95 96 if API_NAME == 'PySide2': 97 API_VERSION = PYSIDE_VERSION 98 elif API_NAME == 'PyQt5': 99 API_VERSION = PYQT_VERSION 100 else: 101 API_VERSION = '' 102 103 text += ( 104 f"<b>Qt</b>: {QtCore.__version__}<br>" 105 f"<b>{API_NAME}</b>: {API_VERSION}<br>" 106 ) 107 108 except Exception as e: 109 text += f"<b>Qt</b>: Import failed ({e})<br>" 110 111 modules = ( 112 ('numpy', 'NumPy'), 113 ('scipy', 'SciPy'), 114 ('dask', 'Dask'), 115 ('vispy', 'VisPy'), 116 ) 117 118 loaded = {} 119 for module, name in modules: 120 try: 121 loaded[module] = __import__(module) 122 text += f"<b>{name}</b>: {loaded[module].__version__}<br>" 123 except Exception as e: 124 text += f"<b>{name}</b>: Import failed ({e})<br>" 125 126 text += "<br><b>OpenGL:</b><br>" 127 128 if loaded.get('vispy', False): 129 sys_info_text = ( 130 "<br>".join( 131 [ 132 loaded['vispy'].sys_info().split("\n")[index] 133 for index in [-4, -3] 134 ] 135 ) 136 .replace("'", "") 137 .replace("<br>", "<br> - ") 138 ) 139 text += f' - {sys_info_text}<br>' 140 else: 141 text += " - failed to load vispy" 142 143 text += "<br><b>Screens:</b><br>" 144 145 try: 146 from qtpy.QtGui import QGuiApplication 147 148 screen_list = QGuiApplication.screens() 149 for i, screen in enumerate(screen_list, start=1): 150 text += f" - screen {i}: resolution {screen.geometry().width()}x{screen.geometry().height()}, scale {screen.devicePixelRatio()}<br>" 151 except Exception as e: 152 text += f" - failed to load screen information {e}" 153 154 plugin_manager.discover() 155 plugin_strings = [] 156 for meta in plugin_manager.list_plugin_metadata(): 157 plugin_name = meta.get('plugin_name') 158 if plugin_name == 'builtins': 159 continue 160 version = meta.get('version') 161 version_string = f": {version}" if version else "" 162 plugin_strings.append(f" - {plugin_name}{version_string}") 163 text += '<br><b>Plugins</b>:' 164 text += ( 165 ("<br>" + "<br>".join(sorted(plugin_strings))) 166 if plugin_strings 167 else ' None' 168 ) 169 170 if not as_html: 171 text = ( 172 text.replace("<br>", "\n").replace("<b>", "").replace("</b>", "") 173 ) 174 return text 175 176 177 citation_text = ( 178 'napari contributors (2019). napari: a ' 179 'multi-dimensional image viewer for python. ' 180 'doi:10.5281/zenodo.3555620' 181 ) 182 [end of napari/utils/info.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/napari/utils/info.py b/napari/utils/info.py --- a/napari/utils/info.py +++ b/napari/utils/info.py @@ -76,6 +76,8 @@ as_html : bool if True, info will be returned as HTML, suitable for a QTextEdit widget """ + from npe2 import PluginManager as Npe2PluginManager + from napari.plugins import plugin_manager sys_version = sys.version.replace('\n', ' ') @@ -152,17 +154,27 @@ text += f" - failed to load screen information {e}" plugin_manager.discover() - plugin_strings = [] + plugin_strings = {} for meta in plugin_manager.list_plugin_metadata(): plugin_name = meta.get('plugin_name') if plugin_name == 'builtins': continue version = meta.get('version') version_string = f": {version}" if version else "" - plugin_strings.append(f" - {plugin_name}{version_string}") + plugin_strings[plugin_name] = f" - {plugin_name}{version_string}" + + npe2_plugin_manager = Npe2PluginManager.instance() + for manifest in npe2_plugin_manager.iter_manifests(): + plugin_name = manifest.name + if plugin_name in ("napari", "builtins"): + continue + version = manifest.package_version + version_string = f": {version}" if version else "" + plugin_strings[plugin_name] = f" - {plugin_name}{version_string}" + text += '<br><b>Plugins</b>:' text += ( - ("<br>" + "<br>".join(sorted(plugin_strings))) + ("<br>" + "<br>".join(sorted(plugin_strings.values()))) if plugin_strings else ' None' )
{"golden_diff": "diff --git a/napari/utils/info.py b/napari/utils/info.py\n--- a/napari/utils/info.py\n+++ b/napari/utils/info.py\n@@ -76,6 +76,8 @@\n as_html : bool\n if True, info will be returned as HTML, suitable for a QTextEdit widget\n \"\"\"\n+ from npe2 import PluginManager as Npe2PluginManager\n+\n from napari.plugins import plugin_manager\n \n sys_version = sys.version.replace('\\n', ' ')\n@@ -152,17 +154,27 @@\n text += f\" - failed to load screen information {e}\"\n \n plugin_manager.discover()\n- plugin_strings = []\n+ plugin_strings = {}\n for meta in plugin_manager.list_plugin_metadata():\n plugin_name = meta.get('plugin_name')\n if plugin_name == 'builtins':\n continue\n version = meta.get('version')\n version_string = f\": {version}\" if version else \"\"\n- plugin_strings.append(f\" - {plugin_name}{version_string}\")\n+ plugin_strings[plugin_name] = f\" - {plugin_name}{version_string}\"\n+\n+ npe2_plugin_manager = Npe2PluginManager.instance()\n+ for manifest in npe2_plugin_manager.iter_manifests():\n+ plugin_name = manifest.name\n+ if plugin_name in (\"napari\", \"builtins\"):\n+ continue\n+ version = manifest.package_version\n+ version_string = f\": {version}\" if version else \"\"\n+ plugin_strings[plugin_name] = f\" - {plugin_name}{version_string}\"\n+\n text += '<br><b>Plugins</b>:'\n text += (\n- (\"<br>\" + \"<br>\".join(sorted(plugin_strings)))\n+ (\"<br>\" + \"<br>\".join(sorted(plugin_strings.values())))\n if plugin_strings\n else ' None'\n )\n", "issue": "npe2 plugins need to be added to `napari --info` \n## \ud83d\udc1b Bug\r\ncurrently, `napari --info` doesn't include npe2 plugins\n", "before_files": [{"content": "import os\nimport platform\nimport subprocess\nimport sys\n\nimport napari\n\nOS_RELEASE_PATH = \"/etc/os-release\"\n\n\ndef _linux_sys_name():\n \"\"\"\n Try to discover linux system name base on /etc/os-release file or lsb_release command output\n https://www.freedesktop.org/software/systemd/man/os-release.html\n \"\"\"\n if os.path.exists(OS_RELEASE_PATH):\n with open(OS_RELEASE_PATH) as f_p:\n data = {}\n for line in f_p:\n field, value = line.split(\"=\")\n data[field.strip()] = value.strip().strip('\"')\n if \"PRETTY_NAME\" in data:\n return data[\"PRETTY_NAME\"]\n if \"NAME\" in data:\n if \"VERSION\" in data:\n return f'{data[\"NAME\"]} {data[\"VERSION\"]}'\n if \"VERSION_ID\" in data:\n return f'{data[\"NAME\"]} {data[\"VERSION_ID\"]}'\n return f'{data[\"NAME\"]} (no version)'\n\n try:\n res = subprocess.run(\n [\"lsb_release\", \"-d\", \"-r\"], check=True, capture_output=True\n )\n text = res.stdout.decode()\n data = {}\n for line in text.split(\"\\n\"):\n key, val = line.split(\":\")\n data[key.strip()] = val.strip()\n version_str = data[\"Description\"]\n if not version_str.endswith(data[\"Release\"]):\n version_str += \" \" + data[\"Release\"]\n return version_str\n except subprocess.CalledProcessError:\n pass\n return \"\"\n\n\ndef _sys_name():\n \"\"\"\n Discover MacOS or Linux Human readable information. For Linux provide information about distribution.\n \"\"\"\n try:\n if sys.platform == \"linux\":\n return _linux_sys_name()\n if sys.platform == \"darwin\":\n try:\n res = subprocess.run(\n [\"sw_vers\", \"-productVersion\"],\n check=True,\n capture_output=True,\n )\n return f\"MacOS {res.stdout.decode().strip()}\"\n except subprocess.CalledProcessError:\n pass\n except Exception:\n pass\n return \"\"\n\n\ndef sys_info(as_html=False):\n \"\"\"Gathers relevant module versions for troubleshooting purposes.\n\n Parameters\n ----------\n as_html : bool\n if True, info will be returned as HTML, suitable for a QTextEdit widget\n \"\"\"\n from napari.plugins import plugin_manager\n\n sys_version = sys.version.replace('\\n', ' ')\n text = (\n f\"<b>napari</b>: {napari.__version__}<br>\"\n f\"<b>Platform</b>: {platform.platform()}<br>\"\n )\n\n __sys_name = _sys_name()\n if __sys_name:\n text += f\"<b>System</b>: {__sys_name}<br>\"\n\n text += f\"<b>Python</b>: {sys_version}<br>\"\n\n try:\n from qtpy import API_NAME, PYQT_VERSION, PYSIDE_VERSION, QtCore\n\n if API_NAME == 'PySide2':\n API_VERSION = PYSIDE_VERSION\n elif API_NAME == 'PyQt5':\n API_VERSION = PYQT_VERSION\n else:\n API_VERSION = ''\n\n text += (\n f\"<b>Qt</b>: {QtCore.__version__}<br>\"\n f\"<b>{API_NAME}</b>: {API_VERSION}<br>\"\n )\n\n except Exception as e:\n text += f\"<b>Qt</b>: Import failed ({e})<br>\"\n\n modules = (\n ('numpy', 'NumPy'),\n ('scipy', 'SciPy'),\n ('dask', 'Dask'),\n ('vispy', 'VisPy'),\n )\n\n loaded = {}\n for module, name in modules:\n try:\n loaded[module] = __import__(module)\n text += f\"<b>{name}</b>: {loaded[module].__version__}<br>\"\n except Exception as e:\n text += f\"<b>{name}</b>: Import failed ({e})<br>\"\n\n text += \"<br><b>OpenGL:</b><br>\"\n\n if loaded.get('vispy', False):\n sys_info_text = (\n \"<br>\".join(\n [\n loaded['vispy'].sys_info().split(\"\\n\")[index]\n for index in [-4, -3]\n ]\n )\n .replace(\"'\", \"\")\n .replace(\"<br>\", \"<br> - \")\n )\n text += f' - {sys_info_text}<br>'\n else:\n text += \" - failed to load vispy\"\n\n text += \"<br><b>Screens:</b><br>\"\n\n try:\n from qtpy.QtGui import QGuiApplication\n\n screen_list = QGuiApplication.screens()\n for i, screen in enumerate(screen_list, start=1):\n text += f\" - screen {i}: resolution {screen.geometry().width()}x{screen.geometry().height()}, scale {screen.devicePixelRatio()}<br>\"\n except Exception as e:\n text += f\" - failed to load screen information {e}\"\n\n plugin_manager.discover()\n plugin_strings = []\n for meta in plugin_manager.list_plugin_metadata():\n plugin_name = meta.get('plugin_name')\n if plugin_name == 'builtins':\n continue\n version = meta.get('version')\n version_string = f\": {version}\" if version else \"\"\n plugin_strings.append(f\" - {plugin_name}{version_string}\")\n text += '<br><b>Plugins</b>:'\n text += (\n (\"<br>\" + \"<br>\".join(sorted(plugin_strings)))\n if plugin_strings\n else ' None'\n )\n\n if not as_html:\n text = (\n text.replace(\"<br>\", \"\\n\").replace(\"<b>\", \"\").replace(\"</b>\", \"\")\n )\n return text\n\n\ncitation_text = (\n 'napari contributors (2019). napari: a '\n 'multi-dimensional image viewer for python. '\n 'doi:10.5281/zenodo.3555620'\n)\n", "path": "napari/utils/info.py"}]}
2,320
409
gh_patches_debug_29116
rasdani/github-patches
git_diff
numba__numba-6290
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> cmath linking issues in CUDA From the mailing list: https://groups.google.com/a/continuum.io/forum/#!topic/numba-users/oBcaFcsD-x4 This: ```python from numba import cuda import cmath import numpy as np @cuda.jit(device = True) def quarticRoots(a, b, c, d, e): del_0 = c**2 - 3.*b*d + 12.*a*e del_1 = 2.*c**3 - 9.*b*c*d + 27*e*b**2 + 27*a*d**2 - 72*a*c*e p = (8.*a*c - 3.*b**2)/(8.*a**2) q = (b**3 - 4.*a*b*c + 8.*d*a**2)/(8.*a**3) Q = ((del_1 + cmath.sqrt(del_1**2 - 4.*del_0**3))/2.)**(1./3.)#**(1./3.) is the problem. S = 0.5*cmath.sqrt(-2./3.*p + 1/(3.*a)*(Q + del_0/Q)) x1 = -b/(4.*a) - S + 0.5*cmath.sqrt(-4.*S**2 - 2.*p + q/S) x2 = -b/(4.*a) - S - 0.5*cmath.sqrt(-4.*S**2 - 2.*p + q/S) x3 = -b/(4.*a) + S + 0.5*cmath.sqrt(-4.*S**2 - 2.*p - q/S) x4 = -b/(4.*a) + S - 0.5*cmath.sqrt(-4.*S**2 - 2.*p - q/S) return (x1, x2, x3, x4) @cuda.jit def launcher(d_array): output = quarticRoots(2.,4.,2.,1.,1.) d_array[0] = output[0] d_array[1] = output[1] d_array[2] = output[2] d_array[3] = output[3] def main(): d_array = cuda.device_array(4, dtype = np.complex64) launcher[1,1](d_array) output = d_array.copy_to_host() print output main() ``` yields: ```python Traceback (most recent call last): File "<path>numba/numba/cuda/cudadrv/driver.py", line 1432, in complete driver.cuLinkComplete(self.handle, byref(cubin), byref(size)) File "<path>numba/numba/cuda/cudadrv/driver.py", line 288, in safe_cuda_api_call self._check_error(fname, retcode) File "<path>numba/numba/cuda/cudadrv/driver.py", line 323, in _check_error raise CudaAPIError(retcode, msg) numba.cuda.cudadrv.driver.CudaAPIError: [999] Call to cuLinkComplete results in CUDA_ERROR_UNKNOWN During handling of the above exception, another exception occurred: Traceback (most recent call last): File "issue_13.py", line 39, in <module> main() File "issue_13.py", line 35, in main launcher[1,1](d_array) File "<path>numba/numba/cuda/compiler.py", line 701, in __call__ kernel = self.specialize(*args) File "<path>numba/numba/cuda/compiler.py", line 712, in specialize kernel = self.compile(argtypes) File "<path>numba/numba/cuda/compiler.py", line 730, in compile kernel.bind() File "<path>numba/numba/cuda/compiler.py", line 489, in bind self._func.get() File "<path>numba/numba/cuda/compiler.py", line 377, in get cubin, _size = linker.complete() File "<path>numba/numba/cuda/cudadrv/driver.py", line 1434, in complete raise LinkerError("%s\n%s" % (e, self.error_log)) numba.cuda.cudadrv.driver.LinkerError: [999] Call to cuLinkComplete results in CUDA_ERROR_UNKNOWN error : Undefined reference to 'numba_cpow' in '<cudapy-ptx>' ``` the `cmath.sqrt()` creates a complex domain result with is then raised to the power `1/3`, this ends up as a call to `numba_cpow` which is fine on the CPU but doesn't have an impl in CUDA, as a result there's a linking error. Two issues: 1. Can the additional cmath function impls for the CPU be done for CUDA. 2. If not can this problem be caught earlier in the complication chain and a better message produced such that is doesn't appear as a link error. </issue> <code> [start of numba/cuda/libdevice.py] 1 import math 2 from llvmlite.llvmpy.core import Type 3 from numba.core import types, cgutils 4 from numba.core.imputils import Registry 5 6 registry = Registry() 7 lower = registry.lower 8 9 float_set = types.float32, types.float64 10 11 12 def bool_implement(nvname, ty): 13 def core(context, builder, sig, args): 14 assert sig.return_type == types.boolean, nvname 15 fty = context.get_value_type(ty) 16 lmod = builder.module 17 fnty = Type.function(Type.int(), [fty]) 18 fn = lmod.get_or_insert_function(fnty, name=nvname) 19 result = builder.call(fn, args) 20 return context.cast(builder, result, types.int32, types.boolean) 21 22 return core 23 24 25 def unary_implement(nvname, ty): 26 def core(context, builder, sig, args): 27 fty = context.get_value_type(ty) 28 lmod = builder.module 29 fnty = Type.function(fty, [fty]) 30 fn = lmod.get_or_insert_function(fnty, name=nvname) 31 return builder.call(fn, args) 32 33 return core 34 35 36 def binary_implement(nvname, ty): 37 def core(context, builder, sig, args): 38 fty = context.get_value_type(ty) 39 lmod = builder.module 40 fnty = Type.function(fty, [fty, fty]) 41 fn = lmod.get_or_insert_function(fnty, name=nvname) 42 return builder.call(fn, args) 43 44 return core 45 46 47 def powi_implement(nvname): 48 def core(context, builder, sig, args): 49 [base, pow] = args 50 [basety, powty] = sig.args 51 lmod = builder.module 52 fty = context.get_value_type(basety) 53 ity = context.get_value_type(types.int32) 54 fnty = Type.function(fty, [fty, ity]) 55 fn = lmod.get_or_insert_function(fnty, name=nvname) 56 return builder.call(fn, [base, pow]) 57 58 return core 59 60 61 lower(math.pow, types.float32, types.int32)(powi_implement('__nv_powif')) 62 lower(math.pow, types.float64, types.int32)(powi_implement('__nv_powi')) 63 64 65 def frexp_implement(nvname): 66 def core(context, builder, sig, args): 67 fracty, expty = sig.return_type 68 float_type = context.get_value_type(fracty) 69 int_type = context.get_value_type(expty) 70 fnty = Type.function(float_type, [float_type, Type.pointer(int_type)]) 71 72 fn = builder.module.get_or_insert_function(fnty, name=nvname) 73 expptr = cgutils.alloca_once(builder, int_type, name='exp') 74 75 ret = builder.call(fn, (args[0], expptr)) 76 return cgutils.pack_struct(builder, (ret, builder.load(expptr))) 77 78 return core 79 80 81 lower(math.frexp, types.float32)(frexp_implement('__nv_frexpf')) 82 lower(math.frexp, types.float64)(frexp_implement('__nv_frexp')) 83 84 lower(math.ldexp, types.float32, types.int32)(powi_implement('__nv_ldexpf')) 85 lower(math.ldexp, types.float64, types.int32)(powi_implement('__nv_ldexp')) 86 87 88 booleans = [] 89 booleans += [('__nv_isnand', '__nv_isnanf', math.isnan)] 90 booleans += [('__nv_isinfd', '__nv_isinff', math.isinf)] 91 booleans += [('__nv_isfinited', '__nv_finitef', math.isfinite)] 92 93 unarys = [] 94 unarys += [('__nv_ceil', '__nv_ceilf', math.ceil)] 95 unarys += [('__nv_floor', '__nv_floorf', math.floor)] 96 unarys += [('__nv_fabs', '__nv_fabsf', math.fabs)] 97 unarys += [('__nv_exp', '__nv_expf', math.exp)] 98 unarys += [('__nv_expm1', '__nv_expm1f', math.expm1)] 99 unarys += [('__nv_erf', '__nv_erff', math.erf)] 100 unarys += [('__nv_erfc', '__nv_erfcf', math.erfc)] 101 unarys += [('__nv_tgamma', '__nv_tgammaf', math.gamma)] 102 unarys += [('__nv_lgamma', '__nv_lgammaf', math.lgamma)] 103 unarys += [('__nv_sqrt', '__nv_sqrtf', math.sqrt)] 104 unarys += [('__nv_log', '__nv_logf', math.log)] 105 unarys += [('__nv_log10', '__nv_log10f', math.log10)] 106 unarys += [('__nv_log1p', '__nv_log1pf', math.log1p)] 107 unarys += [('__nv_acosh', '__nv_acoshf', math.acosh)] 108 unarys += [('__nv_acos', '__nv_acosf', math.acos)] 109 unarys += [('__nv_cos', '__nv_cosf', math.cos)] 110 unarys += [('__nv_cosh', '__nv_coshf', math.cosh)] 111 unarys += [('__nv_asinh', '__nv_asinhf', math.asinh)] 112 unarys += [('__nv_asin', '__nv_asinf', math.asin)] 113 unarys += [('__nv_sin', '__nv_sinf', math.sin)] 114 unarys += [('__nv_sinh', '__nv_sinhf', math.sinh)] 115 unarys += [('__nv_atan', '__nv_atanf', math.atan)] 116 unarys += [('__nv_atanh', '__nv_atanhf', math.atanh)] 117 unarys += [('__nv_tan', '__nv_tanf', math.tan)] 118 unarys += [('__nv_tanh', '__nv_tanhf', math.tanh)] 119 120 binarys = [] 121 binarys += [('__nv_copysign', '__nv_copysignf', math.copysign)] 122 binarys += [('__nv_atan2', '__nv_atan2f', math.atan2)] 123 binarys += [('__nv_pow', '__nv_powf', math.pow)] 124 binarys += [('__nv_fmod', '__nv_fmodf', math.fmod)] 125 binarys += [('__nv_hypot', '__nv_hypotf', math.hypot)] 126 127 128 for name64, name32, key in booleans: 129 impl64 = bool_implement(name64, types.float64) 130 lower(key, types.float64)(impl64) 131 impl32 = bool_implement(name32, types.float32) 132 lower(key, types.float32)(impl32) 133 134 135 for name64, name32, key in unarys: 136 impl64 = unary_implement(name64, types.float64) 137 lower(key, types.float64)(impl64) 138 impl32 = unary_implement(name32, types.float32) 139 lower(key, types.float32)(impl32) 140 141 for name64, name32, key in binarys: 142 impl64 = binary_implement(name64, types.float64) 143 lower(key, types.float64, types.float64)(impl64) 144 impl32 = binary_implement(name32, types.float32) 145 lower(key, types.float32, types.float32)(impl32) 146 147 148 def modf_implement(nvname, ty): 149 def core(context, builder, sig, args): 150 arg, = args 151 argty, = sig.args 152 fty = context.get_value_type(argty) 153 lmod = builder.module 154 ptr = cgutils.alloca_once(builder, fty) 155 fnty = Type.function(fty, [fty, fty.as_pointer()]) 156 fn = lmod.get_or_insert_function(fnty, name=nvname) 157 out = builder.call(fn, [arg, ptr]) 158 ret = context.make_tuple(builder, types.UniTuple(argty, 2), 159 [out, builder.load(ptr)]) 160 return ret 161 return core 162 163 164 for (ty, intrin) in ((types.float64, '__nv_modf',), 165 (types.float32, '__nv_modff',)): 166 lower(math.modf, ty)(modf_implement(intrin, ty)) 167 [end of numba/cuda/libdevice.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/numba/cuda/libdevice.py b/numba/cuda/libdevice.py --- a/numba/cuda/libdevice.py +++ b/numba/cuda/libdevice.py @@ -1,4 +1,5 @@ import math +import operator from llvmlite.llvmpy.core import Type from numba.core import types, cgutils from numba.core.imputils import Registry @@ -164,3 +165,41 @@ for (ty, intrin) in ((types.float64, '__nv_modf',), (types.float32, '__nv_modff',)): lower(math.modf, ty)(modf_implement(intrin, ty)) + + +# Complex power implementations - translations of _Py_c_pow from CPython +# https://github.com/python/cpython/blob/a755410e054e1e2390de5830befc08fe80706c66/Objects/complexobject.c#L123-L151 +# +# The complex64 variant casts all constants and some variables to ensure that +# as much computation is done in single precision as possible. A small number +# of operations are still done in 64-bit, but these come from libdevice code. + +def cpow_implement(fty, cty): + def core(context, builder, sig, args): + def cpow_internal(a, b): + + if b.real == fty(0.0) and b.imag == fty(0.0): + return cty(1.0) + cty(0.0j) + elif a.real == fty(0.0) and b.real == fty(0.0): + return cty(0.0) + cty(0.0j) + + vabs = math.hypot(a.real, a.imag) + len = math.pow(vabs, b.real) + at = math.atan2(a.imag, a.real) + phase = at * b.real + if b.imag != fty(0.0): + len /= math.exp(at * b.imag) + phase += b.imag * math.log(vabs) + + return len * (cty(math.cos(phase)) + + cty(math.sin(phase) * cty(1.0j))) + + return context.compile_internal(builder, cpow_internal, sig, args) + + lower(operator.pow, cty, cty)(core) + lower(operator.ipow, cty, cty)(core) + lower(pow, cty, cty)(core) + + +cpow_implement(types.float32, types.complex64) +cpow_implement(types.float64, types.complex128)
{"golden_diff": "diff --git a/numba/cuda/libdevice.py b/numba/cuda/libdevice.py\n--- a/numba/cuda/libdevice.py\n+++ b/numba/cuda/libdevice.py\n@@ -1,4 +1,5 @@\n import math\n+import operator\n from llvmlite.llvmpy.core import Type\n from numba.core import types, cgutils\n from numba.core.imputils import Registry\n@@ -164,3 +165,41 @@\n for (ty, intrin) in ((types.float64, '__nv_modf',),\n (types.float32, '__nv_modff',)):\n lower(math.modf, ty)(modf_implement(intrin, ty))\n+\n+\n+# Complex power implementations - translations of _Py_c_pow from CPython\n+# https://github.com/python/cpython/blob/a755410e054e1e2390de5830befc08fe80706c66/Objects/complexobject.c#L123-L151\n+#\n+# The complex64 variant casts all constants and some variables to ensure that\n+# as much computation is done in single precision as possible. A small number\n+# of operations are still done in 64-bit, but these come from libdevice code.\n+\n+def cpow_implement(fty, cty):\n+ def core(context, builder, sig, args):\n+ def cpow_internal(a, b):\n+\n+ if b.real == fty(0.0) and b.imag == fty(0.0):\n+ return cty(1.0) + cty(0.0j)\n+ elif a.real == fty(0.0) and b.real == fty(0.0):\n+ return cty(0.0) + cty(0.0j)\n+\n+ vabs = math.hypot(a.real, a.imag)\n+ len = math.pow(vabs, b.real)\n+ at = math.atan2(a.imag, a.real)\n+ phase = at * b.real\n+ if b.imag != fty(0.0):\n+ len /= math.exp(at * b.imag)\n+ phase += b.imag * math.log(vabs)\n+\n+ return len * (cty(math.cos(phase)) +\n+ cty(math.sin(phase) * cty(1.0j)))\n+\n+ return context.compile_internal(builder, cpow_internal, sig, args)\n+\n+ lower(operator.pow, cty, cty)(core)\n+ lower(operator.ipow, cty, cty)(core)\n+ lower(pow, cty, cty)(core)\n+\n+\n+cpow_implement(types.float32, types.complex64)\n+cpow_implement(types.float64, types.complex128)\n", "issue": "cmath linking issues in CUDA\nFrom the mailing list:\r\nhttps://groups.google.com/a/continuum.io/forum/#!topic/numba-users/oBcaFcsD-x4\r\nThis:\r\n```python\r\nfrom numba import cuda\r\nimport cmath\r\nimport numpy as np\r\n\r\[email protected](device = True)\r\ndef quarticRoots(a, b, c, d, e):\r\n\r\n\tdel_0 = c**2 - 3.*b*d + 12.*a*e\r\n\tdel_1 = 2.*c**3 - 9.*b*c*d + 27*e*b**2 + 27*a*d**2 - 72*a*c*e\r\n\r\n\tp = (8.*a*c - 3.*b**2)/(8.*a**2)\r\n\tq = (b**3 - 4.*a*b*c + 8.*d*a**2)/(8.*a**3)\r\n\r\n\tQ = ((del_1 + cmath.sqrt(del_1**2 - 4.*del_0**3))/2.)**(1./3.)#**(1./3.) is the problem.\r\n\tS = 0.5*cmath.sqrt(-2./3.*p + 1/(3.*a)*(Q + del_0/Q))\r\n\r\n\tx1 = -b/(4.*a) - S + 0.5*cmath.sqrt(-4.*S**2 - 2.*p + q/S)\r\n\tx2 = -b/(4.*a) - S - 0.5*cmath.sqrt(-4.*S**2 - 2.*p + q/S)\r\n\tx3 = -b/(4.*a) + S + 0.5*cmath.sqrt(-4.*S**2 - 2.*p - q/S)\r\n\tx4 = -b/(4.*a) + S - 0.5*cmath.sqrt(-4.*S**2 - 2.*p - q/S)\r\n\r\n\treturn (x1, x2, x3, x4)\r\n\r\[email protected]\r\ndef launcher(d_array):\r\n\toutput = quarticRoots(2.,4.,2.,1.,1.)\r\n\r\n\td_array[0] = output[0]\r\n\td_array[1] = output[1]\r\n\td_array[2] = output[2]\r\n\td_array[3] = output[3]\r\n\r\ndef main():\r\n\td_array = cuda.device_array(4, dtype = np.complex64)\r\n\tlauncher[1,1](d_array)\r\n\toutput = d_array.copy_to_host()\r\n\tprint output\r\n\r\nmain()\r\n```\r\nyields:\r\n```python\r\nTraceback (most recent call last):\r\n File \"<path>numba/numba/cuda/cudadrv/driver.py\", line 1432, in complete\r\n driver.cuLinkComplete(self.handle, byref(cubin), byref(size))\r\n File \"<path>numba/numba/cuda/cudadrv/driver.py\", line 288, in safe_cuda_api_call\r\n self._check_error(fname, retcode)\r\n File \"<path>numba/numba/cuda/cudadrv/driver.py\", line 323, in _check_error\r\n raise CudaAPIError(retcode, msg)\r\nnumba.cuda.cudadrv.driver.CudaAPIError: [999] Call to cuLinkComplete results in CUDA_ERROR_UNKNOWN\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"issue_13.py\", line 39, in <module>\r\n main()\r\n File \"issue_13.py\", line 35, in main\r\n launcher[1,1](d_array)\r\n File \"<path>numba/numba/cuda/compiler.py\", line 701, in __call__\r\n kernel = self.specialize(*args)\r\n File \"<path>numba/numba/cuda/compiler.py\", line 712, in specialize\r\n kernel = self.compile(argtypes)\r\n File \"<path>numba/numba/cuda/compiler.py\", line 730, in compile\r\n kernel.bind()\r\n File \"<path>numba/numba/cuda/compiler.py\", line 489, in bind\r\n self._func.get()\r\n File \"<path>numba/numba/cuda/compiler.py\", line 377, in get\r\n cubin, _size = linker.complete()\r\n File \"<path>numba/numba/cuda/cudadrv/driver.py\", line 1434, in complete\r\n raise LinkerError(\"%s\\n%s\" % (e, self.error_log))\r\nnumba.cuda.cudadrv.driver.LinkerError: [999] Call to cuLinkComplete results in CUDA_ERROR_UNKNOWN\r\nerror : Undefined reference to 'numba_cpow' in '<cudapy-ptx>'\r\n```\r\nthe `cmath.sqrt()` creates a complex domain result with is then raised to the power `1/3`, this ends up as a call to `numba_cpow` which is fine on the CPU but doesn't have an impl in CUDA, as a result there's a linking error.\r\n\r\nTwo issues:\r\n1. Can the additional cmath function impls for the CPU be done for CUDA.\r\n2. If not can this problem be caught earlier in the complication chain and a better message produced such that is doesn't appear as a link error.\r\n\n", "before_files": [{"content": "import math\nfrom llvmlite.llvmpy.core import Type\nfrom numba.core import types, cgutils\nfrom numba.core.imputils import Registry\n\nregistry = Registry()\nlower = registry.lower\n\nfloat_set = types.float32, types.float64\n\n\ndef bool_implement(nvname, ty):\n def core(context, builder, sig, args):\n assert sig.return_type == types.boolean, nvname\n fty = context.get_value_type(ty)\n lmod = builder.module\n fnty = Type.function(Type.int(), [fty])\n fn = lmod.get_or_insert_function(fnty, name=nvname)\n result = builder.call(fn, args)\n return context.cast(builder, result, types.int32, types.boolean)\n\n return core\n\n\ndef unary_implement(nvname, ty):\n def core(context, builder, sig, args):\n fty = context.get_value_type(ty)\n lmod = builder.module\n fnty = Type.function(fty, [fty])\n fn = lmod.get_or_insert_function(fnty, name=nvname)\n return builder.call(fn, args)\n\n return core\n\n\ndef binary_implement(nvname, ty):\n def core(context, builder, sig, args):\n fty = context.get_value_type(ty)\n lmod = builder.module\n fnty = Type.function(fty, [fty, fty])\n fn = lmod.get_or_insert_function(fnty, name=nvname)\n return builder.call(fn, args)\n\n return core\n\n\ndef powi_implement(nvname):\n def core(context, builder, sig, args):\n [base, pow] = args\n [basety, powty] = sig.args\n lmod = builder.module\n fty = context.get_value_type(basety)\n ity = context.get_value_type(types.int32)\n fnty = Type.function(fty, [fty, ity])\n fn = lmod.get_or_insert_function(fnty, name=nvname)\n return builder.call(fn, [base, pow])\n\n return core\n\n\nlower(math.pow, types.float32, types.int32)(powi_implement('__nv_powif'))\nlower(math.pow, types.float64, types.int32)(powi_implement('__nv_powi'))\n\n\ndef frexp_implement(nvname):\n def core(context, builder, sig, args):\n fracty, expty = sig.return_type\n float_type = context.get_value_type(fracty)\n int_type = context.get_value_type(expty)\n fnty = Type.function(float_type, [float_type, Type.pointer(int_type)])\n\n fn = builder.module.get_or_insert_function(fnty, name=nvname)\n expptr = cgutils.alloca_once(builder, int_type, name='exp')\n\n ret = builder.call(fn, (args[0], expptr))\n return cgutils.pack_struct(builder, (ret, builder.load(expptr)))\n\n return core\n\n\nlower(math.frexp, types.float32)(frexp_implement('__nv_frexpf'))\nlower(math.frexp, types.float64)(frexp_implement('__nv_frexp'))\n\nlower(math.ldexp, types.float32, types.int32)(powi_implement('__nv_ldexpf'))\nlower(math.ldexp, types.float64, types.int32)(powi_implement('__nv_ldexp'))\n\n\nbooleans = []\nbooleans += [('__nv_isnand', '__nv_isnanf', math.isnan)]\nbooleans += [('__nv_isinfd', '__nv_isinff', math.isinf)]\nbooleans += [('__nv_isfinited', '__nv_finitef', math.isfinite)]\n\nunarys = []\nunarys += [('__nv_ceil', '__nv_ceilf', math.ceil)]\nunarys += [('__nv_floor', '__nv_floorf', math.floor)]\nunarys += [('__nv_fabs', '__nv_fabsf', math.fabs)]\nunarys += [('__nv_exp', '__nv_expf', math.exp)]\nunarys += [('__nv_expm1', '__nv_expm1f', math.expm1)]\nunarys += [('__nv_erf', '__nv_erff', math.erf)]\nunarys += [('__nv_erfc', '__nv_erfcf', math.erfc)]\nunarys += [('__nv_tgamma', '__nv_tgammaf', math.gamma)]\nunarys += [('__nv_lgamma', '__nv_lgammaf', math.lgamma)]\nunarys += [('__nv_sqrt', '__nv_sqrtf', math.sqrt)]\nunarys += [('__nv_log', '__nv_logf', math.log)]\nunarys += [('__nv_log10', '__nv_log10f', math.log10)]\nunarys += [('__nv_log1p', '__nv_log1pf', math.log1p)]\nunarys += [('__nv_acosh', '__nv_acoshf', math.acosh)]\nunarys += [('__nv_acos', '__nv_acosf', math.acos)]\nunarys += [('__nv_cos', '__nv_cosf', math.cos)]\nunarys += [('__nv_cosh', '__nv_coshf', math.cosh)]\nunarys += [('__nv_asinh', '__nv_asinhf', math.asinh)]\nunarys += [('__nv_asin', '__nv_asinf', math.asin)]\nunarys += [('__nv_sin', '__nv_sinf', math.sin)]\nunarys += [('__nv_sinh', '__nv_sinhf', math.sinh)]\nunarys += [('__nv_atan', '__nv_atanf', math.atan)]\nunarys += [('__nv_atanh', '__nv_atanhf', math.atanh)]\nunarys += [('__nv_tan', '__nv_tanf', math.tan)]\nunarys += [('__nv_tanh', '__nv_tanhf', math.tanh)]\n\nbinarys = []\nbinarys += [('__nv_copysign', '__nv_copysignf', math.copysign)]\nbinarys += [('__nv_atan2', '__nv_atan2f', math.atan2)]\nbinarys += [('__nv_pow', '__nv_powf', math.pow)]\nbinarys += [('__nv_fmod', '__nv_fmodf', math.fmod)]\nbinarys += [('__nv_hypot', '__nv_hypotf', math.hypot)]\n\n\nfor name64, name32, key in booleans:\n impl64 = bool_implement(name64, types.float64)\n lower(key, types.float64)(impl64)\n impl32 = bool_implement(name32, types.float32)\n lower(key, types.float32)(impl32)\n\n\nfor name64, name32, key in unarys:\n impl64 = unary_implement(name64, types.float64)\n lower(key, types.float64)(impl64)\n impl32 = unary_implement(name32, types.float32)\n lower(key, types.float32)(impl32)\n\nfor name64, name32, key in binarys:\n impl64 = binary_implement(name64, types.float64)\n lower(key, types.float64, types.float64)(impl64)\n impl32 = binary_implement(name32, types.float32)\n lower(key, types.float32, types.float32)(impl32)\n\n\ndef modf_implement(nvname, ty):\n def core(context, builder, sig, args):\n arg, = args\n argty, = sig.args\n fty = context.get_value_type(argty)\n lmod = builder.module\n ptr = cgutils.alloca_once(builder, fty)\n fnty = Type.function(fty, [fty, fty.as_pointer()])\n fn = lmod.get_or_insert_function(fnty, name=nvname)\n out = builder.call(fn, [arg, ptr])\n ret = context.make_tuple(builder, types.UniTuple(argty, 2),\n [out, builder.load(ptr)])\n return ret\n return core\n\n\nfor (ty, intrin) in ((types.float64, '__nv_modf',),\n (types.float32, '__nv_modff',)):\n lower(math.modf, ty)(modf_implement(intrin, ty))\n", "path": "numba/cuda/libdevice.py"}]}
3,900
626
gh_patches_debug_14393
rasdani/github-patches
git_diff
falconry__falcon-993
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Default OPTIONS responder does not set Content-Length to "0" Per RFC 7231: > A server MUST generate a Content-Length field with a value of "0" if no payload body is to be sent in the response. </issue> <code> [start of falcon/responders.py] 1 # Copyright 2013 by Rackspace Hosting, Inc. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Default responder implementations.""" 16 17 from falcon.errors import HTTPBadRequest 18 from falcon.errors import HTTPMethodNotAllowed 19 from falcon.errors import HTTPNotFound 20 from falcon.status_codes import HTTP_204 21 22 23 def path_not_found(req, resp, **kwargs): 24 """Raise 404 HTTPNotFound error""" 25 raise HTTPNotFound() 26 27 28 def bad_request(req, resp, **kwargs): 29 """Raise 400 HTTPBadRequest error""" 30 raise HTTPBadRequest('Bad request', 'Invalid HTTP method') 31 32 33 def create_method_not_allowed(allowed_methods): 34 """Creates a responder for "405 Method Not Allowed" 35 36 Args: 37 allowed_methods: A list of HTTP methods (uppercase) that should be 38 returned in the Allow header. 39 40 """ 41 def method_not_allowed(req, resp, **kwargs): 42 """Raise 405 HTTPMethodNotAllowed error""" 43 raise HTTPMethodNotAllowed(allowed_methods) 44 45 return method_not_allowed 46 47 48 def create_default_options(allowed_methods): 49 """Creates a default responder for the OPTIONS method 50 51 Args: 52 allowed_methods: A list of HTTP methods (uppercase) that should be 53 returned in the Allow header. 54 55 """ 56 allowed = ', '.join(allowed_methods) 57 58 def on_options(req, resp, **kwargs): 59 resp.status = HTTP_204 60 resp.set_header('Allow', allowed) 61 resp.set_header('Content-Length', '0') 62 63 return on_options 64 [end of falcon/responders.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/falcon/responders.py b/falcon/responders.py --- a/falcon/responders.py +++ b/falcon/responders.py @@ -17,7 +17,7 @@ from falcon.errors import HTTPBadRequest from falcon.errors import HTTPMethodNotAllowed from falcon.errors import HTTPNotFound -from falcon.status_codes import HTTP_204 +from falcon.status_codes import HTTP_200 def path_not_found(req, resp, **kwargs): @@ -56,7 +56,7 @@ allowed = ', '.join(allowed_methods) def on_options(req, resp, **kwargs): - resp.status = HTTP_204 + resp.status = HTTP_200 resp.set_header('Allow', allowed) resp.set_header('Content-Length', '0')
{"golden_diff": "diff --git a/falcon/responders.py b/falcon/responders.py\n--- a/falcon/responders.py\n+++ b/falcon/responders.py\n@@ -17,7 +17,7 @@\n from falcon.errors import HTTPBadRequest\n from falcon.errors import HTTPMethodNotAllowed\n from falcon.errors import HTTPNotFound\n-from falcon.status_codes import HTTP_204\n+from falcon.status_codes import HTTP_200\n \n \n def path_not_found(req, resp, **kwargs):\n@@ -56,7 +56,7 @@\n allowed = ', '.join(allowed_methods)\n \n def on_options(req, resp, **kwargs):\n- resp.status = HTTP_204\n+ resp.status = HTTP_200\n resp.set_header('Allow', allowed)\n resp.set_header('Content-Length', '0')\n", "issue": "Default OPTIONS responder does not set Content-Length to \"0\"\nPer RFC 7231:\n\n> A server MUST generate a Content-Length field with a value of \"0\" if no payload body is to be sent in the response.\n\n", "before_files": [{"content": "# Copyright 2013 by Rackspace Hosting, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Default responder implementations.\"\"\"\n\nfrom falcon.errors import HTTPBadRequest\nfrom falcon.errors import HTTPMethodNotAllowed\nfrom falcon.errors import HTTPNotFound\nfrom falcon.status_codes import HTTP_204\n\n\ndef path_not_found(req, resp, **kwargs):\n \"\"\"Raise 404 HTTPNotFound error\"\"\"\n raise HTTPNotFound()\n\n\ndef bad_request(req, resp, **kwargs):\n \"\"\"Raise 400 HTTPBadRequest error\"\"\"\n raise HTTPBadRequest('Bad request', 'Invalid HTTP method')\n\n\ndef create_method_not_allowed(allowed_methods):\n \"\"\"Creates a responder for \"405 Method Not Allowed\"\n\n Args:\n allowed_methods: A list of HTTP methods (uppercase) that should be\n returned in the Allow header.\n\n \"\"\"\n def method_not_allowed(req, resp, **kwargs):\n \"\"\"Raise 405 HTTPMethodNotAllowed error\"\"\"\n raise HTTPMethodNotAllowed(allowed_methods)\n\n return method_not_allowed\n\n\ndef create_default_options(allowed_methods):\n \"\"\"Creates a default responder for the OPTIONS method\n\n Args:\n allowed_methods: A list of HTTP methods (uppercase) that should be\n returned in the Allow header.\n\n \"\"\"\n allowed = ', '.join(allowed_methods)\n\n def on_options(req, resp, **kwargs):\n resp.status = HTTP_204\n resp.set_header('Allow', allowed)\n resp.set_header('Content-Length', '0')\n\n return on_options\n", "path": "falcon/responders.py"}]}
1,150
182
gh_patches_debug_7878
rasdani/github-patches
git_diff
jupyterhub__zero-to-jupyterhub-k8s-8
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add Jupyter structure for docs - [x] Add readthedocs.yml - [x] move environment.yml inside source directory - [ ] add project jupyter copyright </issue> <code> [start of doc/source/conf.py] 1 #!/usr/bin/env python3 2 # -*- coding: utf-8 -*- 3 # 4 # Learning with JupyterHub documentation build configuration file, created by 5 # sphinx-quickstart on Fri Mar 17 16:07:58 2017. 6 # 7 # This file is execfile()d with the current directory set to its 8 # containing dir. 9 # 10 # Note that not all possible configuration values are present in this 11 # autogenerated file. 12 # 13 # All configuration values have a default; values that are commented out 14 # serve to show the default. 15 16 # If extensions (or modules to document with autodoc) are in another directory, 17 # add these directories to sys.path here. If the directory is relative to the 18 # documentation root, use os.path.abspath to make it absolute, like shown here. 19 # 20 # import os 21 # import sys 22 # sys.path.insert(0, os.path.abspath('.')) 23 import recommonmark 24 25 # -- General configuration ------------------------------------------------ 26 27 # If your documentation needs a minimal Sphinx version, state it here. 28 # 29 # needs_sphinx = '1.0' 30 31 # Add any Sphinx extension module names here, as strings. They can be 32 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 33 # ones. 34 extensions = ['sphinx.ext.mathjax'] 35 36 # Add any paths that contain templates here, relative to this directory. 37 templates_path = ['_templates'] 38 39 source_parsers = { 40 '.md': 'recommonmark.parser.CommonMarkParser', 41 } 42 43 # The suffix(es) of source filenames. 44 # You can specify multiple suffix as a list of string: 45 # 46 # source_suffix = ['.rst', '.md'] 47 source_suffix = ['.rst', '.md'] 48 49 # The master toctree document. 50 master_doc = 'index' 51 52 # General information about the project. 53 project = 'Zero to Jupyterhub' 54 copyright = '2017, Chris Holdgraf' 55 author = 'Chris Holdgraf' 56 57 # The version info for the project you're documenting, acts as replacement for 58 # |version| and |release|, also used in various other places throughout the 59 # built documents. 60 # 61 # The short X.Y version. 62 version = '0.1' 63 # The full version, including alpha/beta/rc tags. 64 release = '0.1' 65 66 # The language for content autogenerated by Sphinx. Refer to documentation 67 # for a list of supported languages. 68 # 69 # This is also used if you do content translation via gettext catalogs. 70 # Usually you set "language" from the command line for these cases. 71 language = None 72 73 # List of patterns, relative to source directory, that match files and 74 # directories to ignore when looking for source files. 75 # This patterns also effect to html_static_path and html_extra_path 76 exclude_patterns = [] 77 78 # The name of the Pygments (syntax highlighting) style to use. 79 pygments_style = 'sphinx' 80 81 # If true, `todo` and `todoList` produce output, else they produce nothing. 82 todo_include_todos = False 83 84 85 # -- Options for HTML output ---------------------------------------------- 86 87 # The theme to use for HTML and HTML Help pages. See the documentation for 88 # a list of builtin themes. 89 # 90 html_theme = 'alabaster' 91 html_favicon = '_static/images/logo/favicon.ico' 92 html_logo = '_static/images/logo/logo.png' 93 94 # Theme options are theme-specific and customize the look and feel of a theme 95 # further. For a list of options available for each theme, see the 96 # documentation. 97 # 98 # html_theme_options = {} 99 100 # Add any paths that contain custom static files (such as style sheets) here, 101 # relative to this directory. They are copied after the builtin static files, 102 # so a file named "default.css" will overwrite the builtin "default.css". 103 html_static_path = ['_static'] 104 105 106 # -- Options for HTMLHelp output ------------------------------------------ 107 108 # Output file base name for HTML help builder. 109 htmlhelp_basename = 'ZeroToJupyterhubDoc' 110 111 112 # -- Options for LaTeX output --------------------------------------------- 113 114 latex_elements = { 115 # The paper size ('letterpaper' or 'a4paper'). 116 # 117 # 'papersize': 'letterpaper', 118 119 # The font size ('10pt', '11pt' or '12pt'). 120 # 121 # 'pointsize': '10pt', 122 123 # Additional stuff for the LaTeX preamble. 124 # 125 # 'preamble': '', 126 127 # Latex figure (float) alignment 128 # 129 # 'figure_align': 'htbp', 130 } 131 132 # Grouping the document tree into LaTeX files. List of tuples 133 # (source start file, target name, title, 134 # author, documentclass [howto, manual, or own class]). 135 latex_documents = [ 136 (master_doc, 'ZeroToJupyterhubDoc.tex', 'Zero to JupyterHub', 137 'Chris Holdgraf', 'manual'), 138 ] 139 140 141 # -- Options for manual page output --------------------------------------- 142 143 # One entry per manual page. List of tuples 144 # (source start file, name, description, authors, manual section). 145 man_pages = [ 146 (master_doc, 'zerotojupyterhub', 'Zero to JupyterHub', 147 [author], 1) 148 ] 149 150 151 # -- Options for Texinfo output ------------------------------------------- 152 153 # Grouping the document tree into Texinfo files. List of tuples 154 # (source start file, target name, title, author, 155 # dir menu entry, description, category) 156 texinfo_documents = [ 157 (master_doc, 'ZeroToJupyterhubDoc', 'Zero to JupyterHub', 158 author, 'ZeroToJupyterhubDoc', 'One line description of project.', 159 'Miscellaneous'), 160 ] 161 162 163 164 # -- Options for Epub output ---------------------------------------------- 165 166 # Bibliographic Dublin Core info. 167 epub_title = project 168 epub_author = author 169 epub_publisher = author 170 epub_copyright = copyright 171 172 # The unique identifier of the text. This can be a ISBN number 173 # or the project homepage. 174 # 175 # epub_identifier = '' 176 177 # A unique identification for the text. 178 # 179 # epub_uid = '' 180 181 # A list of files that should not be packed into the epub file. 182 epub_exclude_files = ['search.html'] 183 184 185 [end of doc/source/conf.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/doc/source/conf.py b/doc/source/conf.py --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -50,9 +50,9 @@ master_doc = 'index' # General information about the project. -project = 'Zero to Jupyterhub' -copyright = '2017, Chris Holdgraf' -author = 'Chris Holdgraf' +project = u'Zero to JupyterHub with Kubernetes' +copyright = u'2016, Project Jupyter team' +author = u'Project Jupyter team' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the
{"golden_diff": "diff --git a/doc/source/conf.py b/doc/source/conf.py\n--- a/doc/source/conf.py\n+++ b/doc/source/conf.py\n@@ -50,9 +50,9 @@\n master_doc = 'index'\n \n # General information about the project.\n-project = 'Zero to Jupyterhub'\n-copyright = '2017, Chris Holdgraf'\n-author = 'Chris Holdgraf'\n+project = u'Zero to JupyterHub with Kubernetes'\n+copyright = u'2016, Project Jupyter team'\n+author = u'Project Jupyter team'\n \n # The version info for the project you're documenting, acts as replacement for\n # |version| and |release|, also used in various other places throughout the\n", "issue": "Add Jupyter structure for docs\n- [x] Add readthedocs.yml\r\n- [x] move environment.yml inside source directory\r\n- [ ] add project jupyter copyright\n", "before_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Learning with JupyterHub documentation build configuration file, created by\n# sphinx-quickstart on Fri Mar 17 16:07:58 2017.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\nimport recommonmark\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = ['sphinx.ext.mathjax']\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\nsource_parsers = {\n '.md': 'recommonmark.parser.CommonMarkParser',\n}\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = ['.rst', '.md']\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Zero to Jupyterhub'\ncopyright = '2017, Chris Holdgraf'\nauthor = 'Chris Holdgraf'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '0.1'\n# The full version, including alpha/beta/rc tags.\nrelease = '0.1'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = []\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'alabaster'\nhtml_favicon = '_static/images/logo/favicon.ico'\nhtml_logo = '_static/images/logo/logo.png'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'ZeroToJupyterhubDoc'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'ZeroToJupyterhubDoc.tex', 'Zero to JupyterHub',\n 'Chris Holdgraf', 'manual'),\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'zerotojupyterhub', 'Zero to JupyterHub',\n [author], 1)\n]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'ZeroToJupyterhubDoc', 'Zero to JupyterHub',\n author, 'ZeroToJupyterhubDoc', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n\n\n# -- Options for Epub output ----------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = project\nepub_author = author\nepub_publisher = author\nepub_copyright = copyright\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\n#\n# epub_identifier = ''\n\n# A unique identification for the text.\n#\n# epub_uid = ''\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = ['search.html']\n\n\n", "path": "doc/source/conf.py"}]}
2,331
156
gh_patches_debug_27996
rasdani/github-patches
git_diff
goauthentik__authentik-6031
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add UnwillingToPerformError to ldap/password.py:95 **Is your feature request related to a problem? Please describe.** Authentik tries to modify the password the AD way, which uses a `modify` operation with the `unicodePwd` field, and then if it gets an `AttributeError` it tries the extended operation. However, [LLDAP](https://github.com/lldap/lldap) doesn't return an `AttributeError` but an `UnwillingToPerformError` since LLDAP doesn't support the modify operation at all, so it completely fails. **Describe the solution you'd like** Add an `UnwillingToPerformError` to the `except` on [`ldap/password.py:95`](https://github.com/goauthentik/authentik/blob/main/authentik/sources/ldap/password.py#L95) **Describe alternatives you've considered** There's no alternative. **Additional context** • [LLDAP log](https://cdn.discordapp.com/attachments/1108339414494613514/1108870676980449300/dockerlogs.txt) <details> <summary>Authentik Event Log</summary> <img width="1604" alt="image" src="https://github.com/goauthentik/authentik/assets/2737082/047b64fa-468f-4a91-b5e7-4eecd27768b6"> </details> This is a follow-up on https://github.com/goauthentik/authentik/issues/5652. </issue> <code> [start of authentik/sources/ldap/password.py] 1 """Help validate and update passwords in LDAP""" 2 from enum import IntFlag 3 from re import split 4 from typing import Optional 5 6 from ldap3 import BASE 7 from ldap3.core.exceptions import LDAPAttributeError 8 from structlog.stdlib import get_logger 9 10 from authentik.core.models import User 11 from authentik.sources.ldap.auth import LDAP_DISTINGUISHED_NAME 12 from authentik.sources.ldap.models import LDAPSource 13 14 LOGGER = get_logger() 15 16 NON_ALPHA = r"~!@#$%^&*_-+=`|\(){}[]:;\"'<>,.?/" 17 RE_DISPLAYNAME_SEPARATORS = r",\.–—_\s#\t" 18 19 20 class PwdProperties(IntFlag): 21 """Possible values for the pwdProperties attribute""" 22 23 DOMAIN_PASSWORD_COMPLEX = 1 24 DOMAIN_PASSWORD_NO_ANON_CHANGE = 2 25 DOMAIN_PASSWORD_NO_CLEAR_CHANGE = 4 26 DOMAIN_LOCKOUT_ADMINS = 8 27 DOMAIN_PASSWORD_STORE_CLEARTEXT = 16 28 DOMAIN_REFUSE_PASSWORD_CHANGE = 32 29 30 31 class PasswordCategories(IntFlag): 32 """Password categories as defined by Microsoft, a category can only be counted 33 once, hence intflag.""" 34 35 NONE = 0 36 ALPHA_LOWER = 1 37 ALPHA_UPPER = 2 38 ALPHA_OTHER = 4 39 NUMERIC = 8 40 SYMBOL = 16 41 42 43 class LDAPPasswordChanger: 44 """Help validate and update passwords in LDAP""" 45 46 _source: LDAPSource 47 48 def __init__(self, source: LDAPSource) -> None: 49 self._source = source 50 self._connection = source.connection() 51 52 def get_domain_root_dn(self) -> str: 53 """Attempt to get root DN via MS specific fields or generic LDAP fields""" 54 info = self._connection.server.info 55 if "rootDomainNamingContext" in info.other: 56 return info.other["rootDomainNamingContext"][0] 57 naming_contexts = info.naming_contexts 58 naming_contexts.sort(key=len) 59 return naming_contexts[0] 60 61 def check_ad_password_complexity_enabled(self) -> bool: 62 """Check if DOMAIN_PASSWORD_COMPLEX is enabled""" 63 root_dn = self.get_domain_root_dn() 64 try: 65 root_attrs = self._connection.extend.standard.paged_search( 66 search_base=root_dn, 67 search_filter="(objectClass=*)", 68 search_scope=BASE, 69 attributes=["pwdProperties"], 70 ) 71 root_attrs = list(root_attrs)[0] 72 except (LDAPAttributeError, KeyError, IndexError): 73 return False 74 raw_pwd_properties = root_attrs.get("attributes", {}).get("pwdProperties", None) 75 if not raw_pwd_properties: 76 return False 77 78 try: 79 pwd_properties = PwdProperties(raw_pwd_properties) 80 except ValueError: 81 return False 82 if PwdProperties.DOMAIN_PASSWORD_COMPLEX in pwd_properties: 83 return True 84 85 return False 86 87 def change_password(self, user: User, password: str): 88 """Change user's password""" 89 user_dn = user.attributes.get(LDAP_DISTINGUISHED_NAME, None) 90 if not user_dn: 91 LOGGER.info(f"User has no {LDAP_DISTINGUISHED_NAME} set.") 92 return 93 try: 94 self._connection.extend.microsoft.modify_password(user_dn, password) 95 except LDAPAttributeError: 96 self._connection.extend.standard.modify_password(user_dn, new_password=password) 97 98 def _ad_check_password_existing(self, password: str, user_dn: str) -> bool: 99 """Check if a password contains sAMAccount or displayName""" 100 users = list( 101 self._connection.extend.standard.paged_search( 102 search_base=user_dn, 103 search_filter=self._source.user_object_filter, 104 search_scope=BASE, 105 attributes=["displayName", "sAMAccountName"], 106 ) 107 ) 108 if len(users) != 1: 109 raise AssertionError() 110 user_attributes = users[0]["attributes"] 111 # If sAMAccountName is longer than 3 chars, check if its contained in password 112 if len(user_attributes["sAMAccountName"]) >= 3: 113 if password.lower() in user_attributes["sAMAccountName"].lower(): 114 return False 115 # No display name set, can't check any further 116 if len(user_attributes["displayName"]) < 1: 117 return True 118 for display_name in user_attributes["displayName"]: 119 display_name_tokens = split(RE_DISPLAYNAME_SEPARATORS, display_name) 120 for token in display_name_tokens: 121 # Ignore tokens under 3 chars 122 if len(token) < 3: 123 continue 124 if token.lower() in password.lower(): 125 return False 126 return True 127 128 def ad_password_complexity(self, password: str, user: Optional[User] = None) -> bool: 129 """Check if password matches Active directory password policies 130 131 https://docs.microsoft.com/en-us/windows/security/threat-protection/ 132 security-policy-settings/password-must-meet-complexity-requirements 133 """ 134 if user: 135 # Check if password contains sAMAccountName or displayNames 136 if LDAP_DISTINGUISHED_NAME in user.attributes: 137 existing_user_check = self._ad_check_password_existing( 138 password, user.attributes.get(LDAP_DISTINGUISHED_NAME) 139 ) 140 if not existing_user_check: 141 LOGGER.debug("Password failed name check", user=user) 142 return existing_user_check 143 144 # Step 2, match at least 3 of 5 categories 145 matched_categories = PasswordCategories.NONE 146 required = 3 147 for letter in password: 148 # Only match one category per letter, 149 if letter.islower(): 150 matched_categories |= PasswordCategories.ALPHA_LOWER 151 elif letter.isupper(): 152 matched_categories |= PasswordCategories.ALPHA_UPPER 153 elif not letter.isascii() and letter.isalpha(): 154 # Not exactly matching microsoft's policy, but count it as "Other unicode" char 155 # when its alpha and not ascii 156 matched_categories |= PasswordCategories.ALPHA_OTHER 157 elif letter.isnumeric(): 158 matched_categories |= PasswordCategories.NUMERIC 159 elif letter in NON_ALPHA: 160 matched_categories |= PasswordCategories.SYMBOL 161 if bin(matched_categories).count("1") < required: 162 LOGGER.debug( 163 "Password didn't match enough categories", 164 has=matched_categories, 165 must=required, 166 ) 167 return False 168 LOGGER.debug("Password matched categories", has=matched_categories, must=required) 169 return True 170 [end of authentik/sources/ldap/password.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/authentik/sources/ldap/password.py b/authentik/sources/ldap/password.py --- a/authentik/sources/ldap/password.py +++ b/authentik/sources/ldap/password.py @@ -4,7 +4,7 @@ from typing import Optional from ldap3 import BASE -from ldap3.core.exceptions import LDAPAttributeError +from ldap3.core.exceptions import LDAPAttributeError, LDAPUnwillingToPerformResult from structlog.stdlib import get_logger from authentik.core.models import User @@ -69,7 +69,7 @@ attributes=["pwdProperties"], ) root_attrs = list(root_attrs)[0] - except (LDAPAttributeError, KeyError, IndexError): + except (LDAPAttributeError, LDAPUnwillingToPerformResult, KeyError, IndexError): return False raw_pwd_properties = root_attrs.get("attributes", {}).get("pwdProperties", None) if not raw_pwd_properties: @@ -92,7 +92,7 @@ return try: self._connection.extend.microsoft.modify_password(user_dn, password) - except LDAPAttributeError: + except (LDAPAttributeError, LDAPUnwillingToPerformResult): self._connection.extend.standard.modify_password(user_dn, new_password=password) def _ad_check_password_existing(self, password: str, user_dn: str) -> bool:
{"golden_diff": "diff --git a/authentik/sources/ldap/password.py b/authentik/sources/ldap/password.py\n--- a/authentik/sources/ldap/password.py\n+++ b/authentik/sources/ldap/password.py\n@@ -4,7 +4,7 @@\n from typing import Optional\n \n from ldap3 import BASE\n-from ldap3.core.exceptions import LDAPAttributeError\n+from ldap3.core.exceptions import LDAPAttributeError, LDAPUnwillingToPerformResult\n from structlog.stdlib import get_logger\n \n from authentik.core.models import User\n@@ -69,7 +69,7 @@\n attributes=[\"pwdProperties\"],\n )\n root_attrs = list(root_attrs)[0]\n- except (LDAPAttributeError, KeyError, IndexError):\n+ except (LDAPAttributeError, LDAPUnwillingToPerformResult, KeyError, IndexError):\n return False\n raw_pwd_properties = root_attrs.get(\"attributes\", {}).get(\"pwdProperties\", None)\n if not raw_pwd_properties:\n@@ -92,7 +92,7 @@\n return\n try:\n self._connection.extend.microsoft.modify_password(user_dn, password)\n- except LDAPAttributeError:\n+ except (LDAPAttributeError, LDAPUnwillingToPerformResult):\n self._connection.extend.standard.modify_password(user_dn, new_password=password)\n \n def _ad_check_password_existing(self, password: str, user_dn: str) -> bool:\n", "issue": "Add UnwillingToPerformError to ldap/password.py:95\n**Is your feature request related to a problem? Please describe.**\r\nAuthentik tries to modify the password the AD way, which uses a `modify` operation with the `unicodePwd` field, and then if it gets an `AttributeError` it tries the extended operation. However, [LLDAP](https://github.com/lldap/lldap) doesn't return an `AttributeError` but an `UnwillingToPerformError` since LLDAP doesn't support the modify operation at all, so it completely fails.\r\n\r\n**Describe the solution you'd like**\r\nAdd an `UnwillingToPerformError` to the `except` on [`ldap/password.py:95`](https://github.com/goauthentik/authentik/blob/main/authentik/sources/ldap/password.py#L95)\r\n\r\n**Describe alternatives you've considered**\r\nThere's no alternative.\r\n\r\n**Additional context**\r\n\u2022 [LLDAP log](https://cdn.discordapp.com/attachments/1108339414494613514/1108870676980449300/dockerlogs.txt)\r\n<details>\r\n<summary>Authentik Event Log</summary>\r\n<img width=\"1604\" alt=\"image\" src=\"https://github.com/goauthentik/authentik/assets/2737082/047b64fa-468f-4a91-b5e7-4eecd27768b6\">\r\n</details>\r\n\r\n\r\nThis is a follow-up on https://github.com/goauthentik/authentik/issues/5652.\r\n\n", "before_files": [{"content": "\"\"\"Help validate and update passwords in LDAP\"\"\"\nfrom enum import IntFlag\nfrom re import split\nfrom typing import Optional\n\nfrom ldap3 import BASE\nfrom ldap3.core.exceptions import LDAPAttributeError\nfrom structlog.stdlib import get_logger\n\nfrom authentik.core.models import User\nfrom authentik.sources.ldap.auth import LDAP_DISTINGUISHED_NAME\nfrom authentik.sources.ldap.models import LDAPSource\n\nLOGGER = get_logger()\n\nNON_ALPHA = r\"~!@#$%^&*_-+=`|\\(){}[]:;\\\"'<>,.?/\"\nRE_DISPLAYNAME_SEPARATORS = r\",\\.\u2013\u2014_\\s#\\t\"\n\n\nclass PwdProperties(IntFlag):\n \"\"\"Possible values for the pwdProperties attribute\"\"\"\n\n DOMAIN_PASSWORD_COMPLEX = 1\n DOMAIN_PASSWORD_NO_ANON_CHANGE = 2\n DOMAIN_PASSWORD_NO_CLEAR_CHANGE = 4\n DOMAIN_LOCKOUT_ADMINS = 8\n DOMAIN_PASSWORD_STORE_CLEARTEXT = 16\n DOMAIN_REFUSE_PASSWORD_CHANGE = 32\n\n\nclass PasswordCategories(IntFlag):\n \"\"\"Password categories as defined by Microsoft, a category can only be counted\n once, hence intflag.\"\"\"\n\n NONE = 0\n ALPHA_LOWER = 1\n ALPHA_UPPER = 2\n ALPHA_OTHER = 4\n NUMERIC = 8\n SYMBOL = 16\n\n\nclass LDAPPasswordChanger:\n \"\"\"Help validate and update passwords in LDAP\"\"\"\n\n _source: LDAPSource\n\n def __init__(self, source: LDAPSource) -> None:\n self._source = source\n self._connection = source.connection()\n\n def get_domain_root_dn(self) -> str:\n \"\"\"Attempt to get root DN via MS specific fields or generic LDAP fields\"\"\"\n info = self._connection.server.info\n if \"rootDomainNamingContext\" in info.other:\n return info.other[\"rootDomainNamingContext\"][0]\n naming_contexts = info.naming_contexts\n naming_contexts.sort(key=len)\n return naming_contexts[0]\n\n def check_ad_password_complexity_enabled(self) -> bool:\n \"\"\"Check if DOMAIN_PASSWORD_COMPLEX is enabled\"\"\"\n root_dn = self.get_domain_root_dn()\n try:\n root_attrs = self._connection.extend.standard.paged_search(\n search_base=root_dn,\n search_filter=\"(objectClass=*)\",\n search_scope=BASE,\n attributes=[\"pwdProperties\"],\n )\n root_attrs = list(root_attrs)[0]\n except (LDAPAttributeError, KeyError, IndexError):\n return False\n raw_pwd_properties = root_attrs.get(\"attributes\", {}).get(\"pwdProperties\", None)\n if not raw_pwd_properties:\n return False\n\n try:\n pwd_properties = PwdProperties(raw_pwd_properties)\n except ValueError:\n return False\n if PwdProperties.DOMAIN_PASSWORD_COMPLEX in pwd_properties:\n return True\n\n return False\n\n def change_password(self, user: User, password: str):\n \"\"\"Change user's password\"\"\"\n user_dn = user.attributes.get(LDAP_DISTINGUISHED_NAME, None)\n if not user_dn:\n LOGGER.info(f\"User has no {LDAP_DISTINGUISHED_NAME} set.\")\n return\n try:\n self._connection.extend.microsoft.modify_password(user_dn, password)\n except LDAPAttributeError:\n self._connection.extend.standard.modify_password(user_dn, new_password=password)\n\n def _ad_check_password_existing(self, password: str, user_dn: str) -> bool:\n \"\"\"Check if a password contains sAMAccount or displayName\"\"\"\n users = list(\n self._connection.extend.standard.paged_search(\n search_base=user_dn,\n search_filter=self._source.user_object_filter,\n search_scope=BASE,\n attributes=[\"displayName\", \"sAMAccountName\"],\n )\n )\n if len(users) != 1:\n raise AssertionError()\n user_attributes = users[0][\"attributes\"]\n # If sAMAccountName is longer than 3 chars, check if its contained in password\n if len(user_attributes[\"sAMAccountName\"]) >= 3:\n if password.lower() in user_attributes[\"sAMAccountName\"].lower():\n return False\n # No display name set, can't check any further\n if len(user_attributes[\"displayName\"]) < 1:\n return True\n for display_name in user_attributes[\"displayName\"]:\n display_name_tokens = split(RE_DISPLAYNAME_SEPARATORS, display_name)\n for token in display_name_tokens:\n # Ignore tokens under 3 chars\n if len(token) < 3:\n continue\n if token.lower() in password.lower():\n return False\n return True\n\n def ad_password_complexity(self, password: str, user: Optional[User] = None) -> bool:\n \"\"\"Check if password matches Active directory password policies\n\n https://docs.microsoft.com/en-us/windows/security/threat-protection/\n security-policy-settings/password-must-meet-complexity-requirements\n \"\"\"\n if user:\n # Check if password contains sAMAccountName or displayNames\n if LDAP_DISTINGUISHED_NAME in user.attributes:\n existing_user_check = self._ad_check_password_existing(\n password, user.attributes.get(LDAP_DISTINGUISHED_NAME)\n )\n if not existing_user_check:\n LOGGER.debug(\"Password failed name check\", user=user)\n return existing_user_check\n\n # Step 2, match at least 3 of 5 categories\n matched_categories = PasswordCategories.NONE\n required = 3\n for letter in password:\n # Only match one category per letter,\n if letter.islower():\n matched_categories |= PasswordCategories.ALPHA_LOWER\n elif letter.isupper():\n matched_categories |= PasswordCategories.ALPHA_UPPER\n elif not letter.isascii() and letter.isalpha():\n # Not exactly matching microsoft's policy, but count it as \"Other unicode\" char\n # when its alpha and not ascii\n matched_categories |= PasswordCategories.ALPHA_OTHER\n elif letter.isnumeric():\n matched_categories |= PasswordCategories.NUMERIC\n elif letter in NON_ALPHA:\n matched_categories |= PasswordCategories.SYMBOL\n if bin(matched_categories).count(\"1\") < required:\n LOGGER.debug(\n \"Password didn't match enough categories\",\n has=matched_categories,\n must=required,\n )\n return False\n LOGGER.debug(\"Password matched categories\", has=matched_categories, must=required)\n return True\n", "path": "authentik/sources/ldap/password.py"}]}
2,678
295
gh_patches_debug_30967
rasdani/github-patches
git_diff
RedHatInsights__insights-core-1643
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Exceptions on the command line with invalid arguments in the archive position When trying to run `python -m insights foo` or `python -m insights existent_but_invalid.py`, an exception is raised all the way to the top, and if the file exists, a partial execution against the current machine happens. We should catch the exception, never partially execute, and provide a simple message for a better CLI experience. </issue> <code> [start of insights/__init__.py] 1 """ 2 Insights Core is a data collection and analysis framework that is built for 3 extensibility and rapid development. It includes a set of reusable components 4 for gathering data in myriad ways and providing a reliable object model for it. 5 6 .. code-block: python 7 8 >>> from insights import run 9 >>> from insights.parsers import installed_rpms as rpm 10 >>> lower = rpm.Rpm("bash-4.4.11-1.fc26") 11 >>> upper = rpm.Rpm("bash-4.4.22-1.fc26") 12 >>> results = run(rpm.Installed) 13 >>> rpms = results[rpm.Installed] 14 >>> rpms.newest("bash") 15 "0:bash-4.4.12-7.fc26" 16 >>> lower <= rpms.newest("bash") < upper 17 True 18 """ 19 from __future__ import print_function 20 import logging 21 import pkgutil 22 import os 23 import sys 24 import yaml 25 26 from .core import Scannable, LogFileOutput, Parser, IniConfigFile # noqa: F401 27 from .core import FileListing, LegacyItemAccess, SysconfigOptions # noqa: F401 28 from .core import YAMLParser, JSONParser, XMLParser, CommandParser # noqa: F401 29 from .core import AttributeDict # noqa: F401 30 from .core import Syslog # noqa: F401 31 from .core.archives import COMPRESSION_TYPES, extract # noqa: F401 32 from .core import dr # noqa: F401 33 from .core.context import ClusterArchiveContext, HostContext, HostArchiveContext, SerializedArchiveContext # noqa: F401 34 from .core.dr import SkipComponent # noqa: F401 35 from .core.hydration import create_context 36 from .core.plugins import combiner, fact, metadata, parser, rule # noqa: F401 37 from .core.plugins import datasource, condition, incident # noqa: F401 38 from .core.plugins import make_response, make_metadata, make_fingerprint # noqa: F401 39 from .core.plugins import make_pass, make_fail # noqa: F401 40 from .core.filters import add_filter, apply_filters, get_filters # noqa: F401 41 from .core.serde import Hydration 42 from .formats import get_formatter 43 from .parsers import get_active_lines # noqa: F401 44 from .util import defaults # noqa: F401 45 46 log = logging.getLogger(__name__) 47 48 49 package_info = dict((k, None) for k in ["RELEASE", "COMMIT", "VERSION", "NAME"]) 50 51 52 for name in package_info: 53 package_info[name] = pkgutil.get_data(__name__, name).strip().decode("utf-8") 54 55 56 def get_nvr(): 57 return "{0}-{1}-{2}".format(package_info["NAME"], 58 package_info["VERSION"], 59 package_info["RELEASE"]) 60 61 62 RULES_STATUS = {} 63 """ 64 Mapping of dictionaries containing nvr and commitid for each rule repo included 65 in this instance 66 67 {"rule_repo_1": {"version": nvr(), "commit": sha1}} 68 """ 69 70 71 def add_status(name, nvr, commit): 72 """ 73 Rule repositories should call this method in their package __init__ to 74 register their version information. 75 """ 76 RULES_STATUS[name] = {"version": nvr, "commit": commit} 77 78 79 def process_dir(broker, root, graph, context, inventory=None): 80 ctx = create_context(root, context) 81 log.debug("Processing %s with %s" % (root, ctx)) 82 83 if isinstance(ctx, ClusterArchiveContext): 84 from .core.cluster import process_cluster 85 archives = [f for f in ctx.all_files if f.endswith(COMPRESSION_TYPES)] 86 return process_cluster(archives, broker=broker, inventory=inventory) 87 88 broker[ctx.__class__] = ctx 89 if isinstance(ctx, SerializedArchiveContext): 90 h = Hydration(ctx.root) 91 broker = h.hydrate(broker=broker) 92 broker = dr.run(graph, broker=broker) 93 return broker 94 95 96 def _run(broker, graph=None, root=None, context=None, inventory=None): 97 """ 98 run is a general interface that is meant for stand alone scripts to use 99 when executing insights components. 100 101 Args: 102 root (str): None will causes a host collection in which command and 103 file specs are run. A directory or archive path will cause 104 collection from the directory or archive, and only file type specs 105 or those that depend on `insights.core.context.HostArchiveContext` 106 will execute. 107 component (function or class): The component to execute. Will only execute 108 the component and its dependency graph. If None, all components with 109 met dependencies will execute. 110 111 Returns: 112 broker: object containing the result of the evaluation. 113 """ 114 115 if not root: 116 context = context or HostContext 117 broker[context] = context() 118 return dr.run(graph, broker=broker) 119 120 if os.path.isdir(root): 121 return process_dir(broker, root, graph, context, inventory=inventory) 122 else: 123 with extract(root) as ex: 124 return process_dir(broker, ex.tmp_dir, graph, context, inventory=inventory) 125 126 127 def apply_configs(configs): 128 """ 129 Configures components. They can be enabled or disabled, have timeouts set 130 if applicable, and have metadata customized. Valid keys are name, enabled, 131 metadata, and timeout. 132 133 Args: 134 configs (list): a list of dictionaries with the following keys: 135 name, enabled, metadata, and timeout. All keys are optional except 136 name. 137 138 name is the prefix or exact name of any loaded component. Any 139 component starting with name will have the associated configuration 140 applied. 141 142 enabled is whether the matching components will execute even if 143 their dependencies are met. Defaults to True. 144 145 timeout sets the class level timeout attribute of any component so 146 long as the attribute already exists. 147 148 metadata is any dictionary that you want to attach to the 149 component. The dictionary can be retrieved by the component at 150 runtime. 151 """ 152 delegate_keys = sorted(dr.DELEGATES, key=dr.get_name) 153 for comp_cfg in configs: 154 name = comp_cfg["name"] 155 for c in delegate_keys: 156 delegate = dr.DELEGATES[c] 157 cname = dr.get_name(c) 158 if cname.startswith(name): 159 dr.ENABLED[c] = comp_cfg.get("enabled", True) 160 delegate.metadata.update(comp_cfg.get("metadata", {})) 161 delegate.tags = set(comp_cfg.get("tags", delegate.tags)) 162 for k, v in delegate.metadata.items(): 163 if hasattr(c, k): 164 setattr(c, k, v) 165 if hasattr(c, "timeout"): 166 c.timeout = comp_cfg.get("timeout", c.timeout) 167 if cname == name: 168 break 169 170 171 def _load_context(path): 172 if path is None: 173 return 174 175 if "." not in path: 176 path = ".".join(["insights.core.context", path]) 177 return dr.get_component(path) 178 179 180 def run(component=None, root=None, print_summary=False, 181 context=None, inventory=None, print_component=None): 182 183 from .core import dr 184 dr.load_components("insights.specs.default") 185 dr.load_components("insights.specs.insights_archive") 186 dr.load_components("insights.specs.sos_archive") 187 dr.load_components("insights.specs.jdr_archive") 188 189 args = None 190 formatter = None 191 if print_summary: 192 import argparse 193 import logging 194 p = argparse.ArgumentParser(add_help=False) 195 p.add_argument("archive", nargs="?", help="Archive or directory to analyze.") 196 p.add_argument("-p", "--plugins", default="", help="Comma-separated list without spaces of package(s) or module(s) containing plugins.") 197 p.add_argument("-c", "--config", help="Configure components.") 198 p.add_argument("-i", "--inventory", help="Ansible inventory file for cluster analysis.") 199 p.add_argument("-v", "--verbose", help="Verbose output.", action="store_true") 200 p.add_argument("-f", "--format", help="Output format.", default="insights.formats.text") 201 p.add_argument("-D", "--debug", help="Verbose debug output.", action="store_true") 202 p.add_argument("--context", help="Execution Context. Defaults to HostContext if an archive isn't passed.") 203 204 class Args(object): 205 pass 206 207 args = Args() 208 p.parse_known_args(namespace=args) 209 p = argparse.ArgumentParser(parents=[p]) 210 args.format = "insights.formats._json" if args.format == "json" else args.format 211 args.format = "insights.formats._yaml" if args.format == "yaml" else args.format 212 fmt = args.format if "." in args.format else "insights.formats." + args.format 213 Formatter = dr.get_component(fmt) 214 if not Formatter: 215 dr.load_components(fmt, continue_on_error=False) 216 Formatter = get_formatter(fmt) 217 Formatter.configure(p) 218 p.parse_args(namespace=args) 219 formatter = Formatter(args) 220 221 logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO if args.verbose else logging.ERROR) 222 context = _load_context(args.context) or context 223 inventory = args.inventory 224 225 root = args.archive or root 226 if root: 227 root = os.path.realpath(root) 228 229 plugins = [] 230 if args.plugins: 231 for path in args.plugins.split(","): 232 path = path.strip() 233 if path.endswith(".py"): 234 path, _ = os.path.splitext(path) 235 path = path.rstrip("/").replace("/", ".") 236 plugins.append(path) 237 238 for p in plugins: 239 dr.load_components(p, continue_on_error=False) 240 241 if args.config: 242 with open(args.config) as f: 243 apply_configs(yaml.safe_load(f)) 244 245 if component is None: 246 component = [] 247 plugins = tuple(plugins) 248 for c in dr.DELEGATES: 249 if c.__module__.startswith(plugins): 250 component.append(c) 251 252 if component: 253 if not isinstance(component, (list, set)): 254 component = [component] 255 graph = {} 256 for c in component: 257 graph.update(dr.get_dependency_graph(c)) 258 else: 259 graph = dr.COMPONENTS[dr.GROUPS.single] 260 261 broker = dr.Broker() 262 263 if formatter: 264 formatter.preprocess(broker) 265 broker = _run(broker, graph, root, context=context, inventory=inventory) 266 formatter.postprocess(broker) 267 elif print_component: 268 broker = _run(broker, graph, root, context=context, inventory=inventory) 269 broker.print_component(print_component) 270 else: 271 broker = _run(broker, graph, root, context=context, inventory=inventory) 272 273 return broker 274 275 276 def main(): 277 if "" not in sys.path: 278 sys.path.insert(0, "") 279 run(print_summary=True) 280 281 282 if __name__ == "__main__": 283 main() 284 [end of insights/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/insights/__init__.py b/insights/__init__.py --- a/insights/__init__.py +++ b/insights/__init__.py @@ -28,7 +28,7 @@ from .core import YAMLParser, JSONParser, XMLParser, CommandParser # noqa: F401 from .core import AttributeDict # noqa: F401 from .core import Syslog # noqa: F401 -from .core.archives import COMPRESSION_TYPES, extract # noqa: F401 +from .core.archives import COMPRESSION_TYPES, extract, InvalidArchive, InvalidContentType # noqa: F401 from .core import dr # noqa: F401 from .core.context import ClusterArchiveContext, HostContext, HostArchiveContext, SerializedArchiveContext # noqa: F401 from .core.dr import SkipComponent # noqa: F401 @@ -260,17 +260,25 @@ broker = dr.Broker() - if formatter: - formatter.preprocess(broker) - broker = _run(broker, graph, root, context=context, inventory=inventory) - formatter.postprocess(broker) - elif print_component: - broker = _run(broker, graph, root, context=context, inventory=inventory) - broker.print_component(print_component) - else: - broker = _run(broker, graph, root, context=context, inventory=inventory) - - return broker + try: + if formatter: + formatter.preprocess(broker) + broker = _run(broker, graph, root, context=context, inventory=inventory) + formatter.postprocess(broker) + elif print_component: + broker = _run(broker, graph, root, context=context, inventory=inventory) + broker.print_component(print_component) + else: + broker = _run(broker, graph, root, context=context, inventory=inventory) + + return broker + except (InvalidContentType, InvalidArchive): + if args and args.archive: + path = args.archive + msg = "Invalid directory or archive. Did you mean to pass -p {p}?" + log.error(msg.format(p=path)) + else: + raise def main():
{"golden_diff": "diff --git a/insights/__init__.py b/insights/__init__.py\n--- a/insights/__init__.py\n+++ b/insights/__init__.py\n@@ -28,7 +28,7 @@\n from .core import YAMLParser, JSONParser, XMLParser, CommandParser # noqa: F401\n from .core import AttributeDict # noqa: F401\n from .core import Syslog # noqa: F401\n-from .core.archives import COMPRESSION_TYPES, extract # noqa: F401\n+from .core.archives import COMPRESSION_TYPES, extract, InvalidArchive, InvalidContentType # noqa: F401\n from .core import dr # noqa: F401\n from .core.context import ClusterArchiveContext, HostContext, HostArchiveContext, SerializedArchiveContext # noqa: F401\n from .core.dr import SkipComponent # noqa: F401\n@@ -260,17 +260,25 @@\n \n broker = dr.Broker()\n \n- if formatter:\n- formatter.preprocess(broker)\n- broker = _run(broker, graph, root, context=context, inventory=inventory)\n- formatter.postprocess(broker)\n- elif print_component:\n- broker = _run(broker, graph, root, context=context, inventory=inventory)\n- broker.print_component(print_component)\n- else:\n- broker = _run(broker, graph, root, context=context, inventory=inventory)\n-\n- return broker\n+ try:\n+ if formatter:\n+ formatter.preprocess(broker)\n+ broker = _run(broker, graph, root, context=context, inventory=inventory)\n+ formatter.postprocess(broker)\n+ elif print_component:\n+ broker = _run(broker, graph, root, context=context, inventory=inventory)\n+ broker.print_component(print_component)\n+ else:\n+ broker = _run(broker, graph, root, context=context, inventory=inventory)\n+\n+ return broker\n+ except (InvalidContentType, InvalidArchive):\n+ if args and args.archive:\n+ path = args.archive\n+ msg = \"Invalid directory or archive. Did you mean to pass -p {p}?\"\n+ log.error(msg.format(p=path))\n+ else:\n+ raise\n \n \n def main():\n", "issue": "Exceptions on the command line with invalid arguments in the archive position\nWhen trying to run `python -m insights foo` or `python -m insights existent_but_invalid.py`, an exception is raised all the way to the top, and if the file exists, a partial execution against the current machine happens. We should catch the exception, never partially execute, and provide a simple message for a better CLI experience.\n", "before_files": [{"content": "\"\"\"\nInsights Core is a data collection and analysis framework that is built for\nextensibility and rapid development. It includes a set of reusable components\nfor gathering data in myriad ways and providing a reliable object model for it.\n\n.. code-block: python\n\n >>> from insights import run\n >>> from insights.parsers import installed_rpms as rpm\n >>> lower = rpm.Rpm(\"bash-4.4.11-1.fc26\")\n >>> upper = rpm.Rpm(\"bash-4.4.22-1.fc26\")\n >>> results = run(rpm.Installed)\n >>> rpms = results[rpm.Installed]\n >>> rpms.newest(\"bash\")\n \"0:bash-4.4.12-7.fc26\"\n >>> lower <= rpms.newest(\"bash\") < upper\n True\n\"\"\"\nfrom __future__ import print_function\nimport logging\nimport pkgutil\nimport os\nimport sys\nimport yaml\n\nfrom .core import Scannable, LogFileOutput, Parser, IniConfigFile # noqa: F401\nfrom .core import FileListing, LegacyItemAccess, SysconfigOptions # noqa: F401\nfrom .core import YAMLParser, JSONParser, XMLParser, CommandParser # noqa: F401\nfrom .core import AttributeDict # noqa: F401\nfrom .core import Syslog # noqa: F401\nfrom .core.archives import COMPRESSION_TYPES, extract # noqa: F401\nfrom .core import dr # noqa: F401\nfrom .core.context import ClusterArchiveContext, HostContext, HostArchiveContext, SerializedArchiveContext # noqa: F401\nfrom .core.dr import SkipComponent # noqa: F401\nfrom .core.hydration import create_context\nfrom .core.plugins import combiner, fact, metadata, parser, rule # noqa: F401\nfrom .core.plugins import datasource, condition, incident # noqa: F401\nfrom .core.plugins import make_response, make_metadata, make_fingerprint # noqa: F401\nfrom .core.plugins import make_pass, make_fail # noqa: F401\nfrom .core.filters import add_filter, apply_filters, get_filters # noqa: F401\nfrom .core.serde import Hydration\nfrom .formats import get_formatter\nfrom .parsers import get_active_lines # noqa: F401\nfrom .util import defaults # noqa: F401\n\nlog = logging.getLogger(__name__)\n\n\npackage_info = dict((k, None) for k in [\"RELEASE\", \"COMMIT\", \"VERSION\", \"NAME\"])\n\n\nfor name in package_info:\n package_info[name] = pkgutil.get_data(__name__, name).strip().decode(\"utf-8\")\n\n\ndef get_nvr():\n return \"{0}-{1}-{2}\".format(package_info[\"NAME\"],\n package_info[\"VERSION\"],\n package_info[\"RELEASE\"])\n\n\nRULES_STATUS = {}\n\"\"\"\nMapping of dictionaries containing nvr and commitid for each rule repo included\nin this instance\n\n{\"rule_repo_1\": {\"version\": nvr(), \"commit\": sha1}}\n\"\"\"\n\n\ndef add_status(name, nvr, commit):\n \"\"\"\n Rule repositories should call this method in their package __init__ to\n register their version information.\n \"\"\"\n RULES_STATUS[name] = {\"version\": nvr, \"commit\": commit}\n\n\ndef process_dir(broker, root, graph, context, inventory=None):\n ctx = create_context(root, context)\n log.debug(\"Processing %s with %s\" % (root, ctx))\n\n if isinstance(ctx, ClusterArchiveContext):\n from .core.cluster import process_cluster\n archives = [f for f in ctx.all_files if f.endswith(COMPRESSION_TYPES)]\n return process_cluster(archives, broker=broker, inventory=inventory)\n\n broker[ctx.__class__] = ctx\n if isinstance(ctx, SerializedArchiveContext):\n h = Hydration(ctx.root)\n broker = h.hydrate(broker=broker)\n broker = dr.run(graph, broker=broker)\n return broker\n\n\ndef _run(broker, graph=None, root=None, context=None, inventory=None):\n \"\"\"\n run is a general interface that is meant for stand alone scripts to use\n when executing insights components.\n\n Args:\n root (str): None will causes a host collection in which command and\n file specs are run. A directory or archive path will cause\n collection from the directory or archive, and only file type specs\n or those that depend on `insights.core.context.HostArchiveContext`\n will execute.\n component (function or class): The component to execute. Will only execute\n the component and its dependency graph. If None, all components with\n met dependencies will execute.\n\n Returns:\n broker: object containing the result of the evaluation.\n \"\"\"\n\n if not root:\n context = context or HostContext\n broker[context] = context()\n return dr.run(graph, broker=broker)\n\n if os.path.isdir(root):\n return process_dir(broker, root, graph, context, inventory=inventory)\n else:\n with extract(root) as ex:\n return process_dir(broker, ex.tmp_dir, graph, context, inventory=inventory)\n\n\ndef apply_configs(configs):\n \"\"\"\n Configures components. They can be enabled or disabled, have timeouts set\n if applicable, and have metadata customized. Valid keys are name, enabled,\n metadata, and timeout.\n\n Args:\n configs (list): a list of dictionaries with the following keys:\n name, enabled, metadata, and timeout. All keys are optional except\n name.\n\n name is the prefix or exact name of any loaded component. Any\n component starting with name will have the associated configuration\n applied.\n\n enabled is whether the matching components will execute even if\n their dependencies are met. Defaults to True.\n\n timeout sets the class level timeout attribute of any component so\n long as the attribute already exists.\n\n metadata is any dictionary that you want to attach to the\n component. The dictionary can be retrieved by the component at\n runtime.\n \"\"\"\n delegate_keys = sorted(dr.DELEGATES, key=dr.get_name)\n for comp_cfg in configs:\n name = comp_cfg[\"name\"]\n for c in delegate_keys:\n delegate = dr.DELEGATES[c]\n cname = dr.get_name(c)\n if cname.startswith(name):\n dr.ENABLED[c] = comp_cfg.get(\"enabled\", True)\n delegate.metadata.update(comp_cfg.get(\"metadata\", {}))\n delegate.tags = set(comp_cfg.get(\"tags\", delegate.tags))\n for k, v in delegate.metadata.items():\n if hasattr(c, k):\n setattr(c, k, v)\n if hasattr(c, \"timeout\"):\n c.timeout = comp_cfg.get(\"timeout\", c.timeout)\n if cname == name:\n break\n\n\ndef _load_context(path):\n if path is None:\n return\n\n if \".\" not in path:\n path = \".\".join([\"insights.core.context\", path])\n return dr.get_component(path)\n\n\ndef run(component=None, root=None, print_summary=False,\n context=None, inventory=None, print_component=None):\n\n from .core import dr\n dr.load_components(\"insights.specs.default\")\n dr.load_components(\"insights.specs.insights_archive\")\n dr.load_components(\"insights.specs.sos_archive\")\n dr.load_components(\"insights.specs.jdr_archive\")\n\n args = None\n formatter = None\n if print_summary:\n import argparse\n import logging\n p = argparse.ArgumentParser(add_help=False)\n p.add_argument(\"archive\", nargs=\"?\", help=\"Archive or directory to analyze.\")\n p.add_argument(\"-p\", \"--plugins\", default=\"\", help=\"Comma-separated list without spaces of package(s) or module(s) containing plugins.\")\n p.add_argument(\"-c\", \"--config\", help=\"Configure components.\")\n p.add_argument(\"-i\", \"--inventory\", help=\"Ansible inventory file for cluster analysis.\")\n p.add_argument(\"-v\", \"--verbose\", help=\"Verbose output.\", action=\"store_true\")\n p.add_argument(\"-f\", \"--format\", help=\"Output format.\", default=\"insights.formats.text\")\n p.add_argument(\"-D\", \"--debug\", help=\"Verbose debug output.\", action=\"store_true\")\n p.add_argument(\"--context\", help=\"Execution Context. Defaults to HostContext if an archive isn't passed.\")\n\n class Args(object):\n pass\n\n args = Args()\n p.parse_known_args(namespace=args)\n p = argparse.ArgumentParser(parents=[p])\n args.format = \"insights.formats._json\" if args.format == \"json\" else args.format\n args.format = \"insights.formats._yaml\" if args.format == \"yaml\" else args.format\n fmt = args.format if \".\" in args.format else \"insights.formats.\" + args.format\n Formatter = dr.get_component(fmt)\n if not Formatter:\n dr.load_components(fmt, continue_on_error=False)\n Formatter = get_formatter(fmt)\n Formatter.configure(p)\n p.parse_args(namespace=args)\n formatter = Formatter(args)\n\n logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO if args.verbose else logging.ERROR)\n context = _load_context(args.context) or context\n inventory = args.inventory\n\n root = args.archive or root\n if root:\n root = os.path.realpath(root)\n\n plugins = []\n if args.plugins:\n for path in args.plugins.split(\",\"):\n path = path.strip()\n if path.endswith(\".py\"):\n path, _ = os.path.splitext(path)\n path = path.rstrip(\"/\").replace(\"/\", \".\")\n plugins.append(path)\n\n for p in plugins:\n dr.load_components(p, continue_on_error=False)\n\n if args.config:\n with open(args.config) as f:\n apply_configs(yaml.safe_load(f))\n\n if component is None:\n component = []\n plugins = tuple(plugins)\n for c in dr.DELEGATES:\n if c.__module__.startswith(plugins):\n component.append(c)\n\n if component:\n if not isinstance(component, (list, set)):\n component = [component]\n graph = {}\n for c in component:\n graph.update(dr.get_dependency_graph(c))\n else:\n graph = dr.COMPONENTS[dr.GROUPS.single]\n\n broker = dr.Broker()\n\n if formatter:\n formatter.preprocess(broker)\n broker = _run(broker, graph, root, context=context, inventory=inventory)\n formatter.postprocess(broker)\n elif print_component:\n broker = _run(broker, graph, root, context=context, inventory=inventory)\n broker.print_component(print_component)\n else:\n broker = _run(broker, graph, root, context=context, inventory=inventory)\n\n return broker\n\n\ndef main():\n if \"\" not in sys.path:\n sys.path.insert(0, \"\")\n run(print_summary=True)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "insights/__init__.py"}]}
3,753
517
gh_patches_debug_37383
rasdani/github-patches
git_diff
pyinstaller__pyinstaller-3568
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Python3.4 PyQt5 QML application requires environment variables With the PyQt5 fixes merged https://github.com/pyinstaller/pyinstaller/pull/3439 I decided to try creating an executable that I have been having trouble with. https://github.com/pyinstaller/pyinstaller/pull/3439#issuecomment-379064155 This is the source code https://github.com/Siecje/qml-testing/tree/PyInstaller I'm using - Windows 7 32-bit - Qt 5.10.1 - PyQt5 compiled from source - Python 3.4.4 - pip install https://github.com/pyinstaller/pyinstaller/archive/develop.zip When I run the .exe I get an error ``` QWindowsEGLStaticContext::create: Failed to load and resolve libEGL function Failed to load opengl32sw.dll (The specified module could not be found.) Failed to load and resolve WGL/OpenGL functions Failed to create OpenGL context for format QsurfaceFormat(version 2.0, options QFlags<QSurfaceFormat::FormatOption>(), depthBufferSize 24, ...<snip> This is most likely caused by not having the necessary graphics drivers installed. Install a driver providing OpenGL 2.0 or higher, or, if this is not possible, make sure the ANGLE Open GL ES 2.0 emulation libraries (libEGL.dll, libLESv2.dll and d3dcompiler_*.dll) are available in the application executabl's directory or in a location listed in PATH. ``` To run the application I can copy these four .dlls into the `dist\main\` directory. - libEGL.dll - libGLESv2.dll - d3dcompiler_47.dll - opengl32sw.dll When I run it I get Command Prompt window with this output. ``` QWindowsWGLStaticContext::create: Could not initialize EGL display: error 0x3001 QWindowsWGLStaticContext::create: When using ANGLE, check if d3dcompiler_4x.dll is available. ``` Instead of copying those .dll files I can add the Qt bin directory to my PATH. ``` set PATH=%PATH%;C:\Qt\5.10.1\msvc2015\bin call main.exe QWindowsWGLStaticContext::create: Could not initialize EGL display: error 0x3001 QWindowsWGLStaticContext::create: When using ANGLE, check if d3dcompiler_4x.dll is available. ``` When I copy the `dist\main\` to another computer (Windows 10). I have to set two environment variables before the application will work. ``` set QT_QPA_PLATFORM_PLUGIN_PATH=%exeDir%\PyQt5\Qt\plugins\platforms set QML2_IMPORT_PATH=%exeDir%\PyQt5\Qt\qml ``` There are no error messages on the Windows 10 computer with these two environment variables set. </issue> <code> [start of PyInstaller/hooks/hook-PyQt5.py] 1 #----------------------------------------------------------------------------- 2 # Copyright (c) 2005-2018, PyInstaller Development Team. 3 # 4 # Distributed under the terms of the GNU General Public License with exception 5 # for distributing bootloader. 6 # 7 # The full license is in the file COPYING.txt, distributed with this software. 8 #----------------------------------------------------------------------------- 9 import os 10 11 from PyInstaller.utils.hooks import pyqt5_library_info, collect_system_data_files 12 13 hiddenimports = ['sip'] 14 15 # Collect the ``qt.conf`` file. 16 datas = [x for x in 17 collect_system_data_files(pyqt5_library_info.location['PrefixPath'], 18 'PyQt5') 19 if os.path.basename(x[0]) == 'qt.conf'] 20 21 # Include ICU files, if they exist. See the "Deployment approach" section in 22 # ``PyInstaller/utils/hooks/qt.py``. 23 [(os.path.join(pyqt5_library_info.location['BinariesPath'], dll), 24 os.path.join('PyQt5', 'Qt', 'bin', dll)) 25 for dll in ('icudt??.dll', 'icuin??.dll', 'icuuc??.dll')] 26 27 # TODO: Include software rendering for OpenGL. See the "Deployment approach". However, because the standard PyQt5 wheel `doesn't include <https://www.riverbankcomputing.com/pipermail/pyqt/2018-June/040387.html>`_ ``d3dcompiler_XX.dll``, this produces failures. When the wheel is updated, this code can be uncommented. 28 ##binaries = [] 29 ##for dll in ('libEGL.dll', 'libGLESv2.dll', 'd3dcompiler_??.dll', 'opengl32sw.dll'): 30 ## dll_path = os.path.join(pyqt5_library_info.location['BinariesPath'], dll) 31 ## # Only add files if they exist. 32 ## if glob(dll_path): 33 ## binaries += [(dll_path, os.path.join('PyQt5', 'Qt', 'bin', dll))] 34 [end of PyInstaller/hooks/hook-PyQt5.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/PyInstaller/hooks/hook-PyQt5.py b/PyInstaller/hooks/hook-PyQt5.py --- a/PyInstaller/hooks/hook-PyQt5.py +++ b/PyInstaller/hooks/hook-PyQt5.py @@ -6,6 +6,7 @@ # # The full license is in the file COPYING.txt, distributed with this software. #----------------------------------------------------------------------------- +import glob import os from PyInstaller.utils.hooks import pyqt5_library_info, collect_system_data_files @@ -18,16 +19,42 @@ 'PyQt5') if os.path.basename(x[0]) == 'qt.conf'] -# Include ICU files, if they exist. See the "Deployment approach" section in -# ``PyInstaller/utils/hooks/qt.py``. -[(os.path.join(pyqt5_library_info.location['BinariesPath'], dll), - os.path.join('PyQt5', 'Qt', 'bin', dll)) - for dll in ('icudt??.dll', 'icuin??.dll', 'icuuc??.dll')] - -# TODO: Include software rendering for OpenGL. See the "Deployment approach". However, because the standard PyQt5 wheel `doesn't include <https://www.riverbankcomputing.com/pipermail/pyqt/2018-June/040387.html>`_ ``d3dcompiler_XX.dll``, this produces failures. When the wheel is updated, this code can be uncommented. -##binaries = [] -##for dll in ('libEGL.dll', 'libGLESv2.dll', 'd3dcompiler_??.dll', 'opengl32sw.dll'): -## dll_path = os.path.join(pyqt5_library_info.location['BinariesPath'], dll) -## # Only add files if they exist. -## if glob(dll_path): -## binaries += [(dll_path, os.path.join('PyQt5', 'Qt', 'bin', dll))] + +def find_all_or_none(globs_to_include, num_files): + """ + globs_to_include is a list of file name globs + If the number of found files does not match num_files + then no files will be included. + """ + # TODO: This function is required because CI is failing to include libEGL + # The error in AppVeyor is: + # [2312] LOADER: Running pyi_lib_PyQt5-uic.py + # Failed to load libEGL (Access is denied.) + # More info: https://github.com/pyinstaller/pyinstaller/pull/3568 + # Since the PyQt5 wheels do not include d3dcompiler_4?.dll, libEGL.dll and + # libGLESv2.dll will not be included for PyQt5 builds during CI. + to_include = [] + for dll in globs_to_include: + dll_path = os.path.join(pyqt5_library_info.location['BinariesPath'], + dll) + dll_file_paths = glob.glob(dll_path) + for dll_file_path in dll_file_paths: + file_name = os.path.basename(dll_file_path) + dst_dll_path = os.path.join('PyQt5', 'Qt', 'bin', file_name) + to_include.append((dll_file_path, dst_dll_path)) + if len(to_include) == num_files: + return to_include + return [] + + +binaries = [] +angle_files = ['libEGL.dll', 'libGLESv2.dll', 'd3dcompiler_??.dll'] +binaries += find_all_or_none(angle_files, 3) + +opengl_software_renderer = ['opengl32sw.dll'] +binaries += find_all_or_none(opengl_software_renderer, 1) + +# Include ICU files, if they exist. +# See the "Deployment approach" section in ``PyInstaller/utils/hooks/qt.py``. +icu_files = ['icudt??.dll', 'icuin??.dll', 'icuuc??.dll'] +binaries += find_all_or_none(icu_files, 3)
{"golden_diff": "diff --git a/PyInstaller/hooks/hook-PyQt5.py b/PyInstaller/hooks/hook-PyQt5.py\n--- a/PyInstaller/hooks/hook-PyQt5.py\n+++ b/PyInstaller/hooks/hook-PyQt5.py\n@@ -6,6 +6,7 @@\n #\n # The full license is in the file COPYING.txt, distributed with this software.\n #-----------------------------------------------------------------------------\n+import glob\n import os\n \n from PyInstaller.utils.hooks import pyqt5_library_info, collect_system_data_files\n@@ -18,16 +19,42 @@\n 'PyQt5')\n if os.path.basename(x[0]) == 'qt.conf']\n \n-# Include ICU files, if they exist. See the \"Deployment approach\" section in\n-# ``PyInstaller/utils/hooks/qt.py``.\n-[(os.path.join(pyqt5_library_info.location['BinariesPath'], dll),\n- os.path.join('PyQt5', 'Qt', 'bin', dll))\n- for dll in ('icudt??.dll', 'icuin??.dll', 'icuuc??.dll')]\n-\n-# TODO: Include software rendering for OpenGL. See the \"Deployment approach\". However, because the standard PyQt5 wheel `doesn't include <https://www.riverbankcomputing.com/pipermail/pyqt/2018-June/040387.html>`_ ``d3dcompiler_XX.dll``, this produces failures. When the wheel is updated, this code can be uncommented.\n-##binaries = []\n-##for dll in ('libEGL.dll', 'libGLESv2.dll', 'd3dcompiler_??.dll', 'opengl32sw.dll'):\n-## dll_path = os.path.join(pyqt5_library_info.location['BinariesPath'], dll)\n-## # Only add files if they exist.\n-## if glob(dll_path):\n-## binaries += [(dll_path, os.path.join('PyQt5', 'Qt', 'bin', dll))]\n+\n+def find_all_or_none(globs_to_include, num_files):\n+ \"\"\"\n+ globs_to_include is a list of file name globs\n+ If the number of found files does not match num_files\n+ then no files will be included.\n+ \"\"\"\n+ # TODO: This function is required because CI is failing to include libEGL\n+ # The error in AppVeyor is:\n+ # [2312] LOADER: Running pyi_lib_PyQt5-uic.py\n+ # Failed to load libEGL (Access is denied.)\n+ # More info: https://github.com/pyinstaller/pyinstaller/pull/3568\n+ # Since the PyQt5 wheels do not include d3dcompiler_4?.dll, libEGL.dll and\n+ # libGLESv2.dll will not be included for PyQt5 builds during CI.\n+ to_include = []\n+ for dll in globs_to_include:\n+ dll_path = os.path.join(pyqt5_library_info.location['BinariesPath'],\n+ dll)\n+ dll_file_paths = glob.glob(dll_path)\n+ for dll_file_path in dll_file_paths:\n+ file_name = os.path.basename(dll_file_path)\n+ dst_dll_path = os.path.join('PyQt5', 'Qt', 'bin', file_name)\n+ to_include.append((dll_file_path, dst_dll_path))\n+ if len(to_include) == num_files:\n+ return to_include\n+ return []\n+\n+\n+binaries = []\n+angle_files = ['libEGL.dll', 'libGLESv2.dll', 'd3dcompiler_??.dll']\n+binaries += find_all_or_none(angle_files, 3)\n+\n+opengl_software_renderer = ['opengl32sw.dll']\n+binaries += find_all_or_none(opengl_software_renderer, 1)\n+\n+# Include ICU files, if they exist.\n+# See the \"Deployment approach\" section in ``PyInstaller/utils/hooks/qt.py``.\n+icu_files = ['icudt??.dll', 'icuin??.dll', 'icuuc??.dll']\n+binaries += find_all_or_none(icu_files, 3)\n", "issue": "Python3.4 PyQt5 QML application requires environment variables\nWith the PyQt5 fixes merged https://github.com/pyinstaller/pyinstaller/pull/3439 I decided to try creating an executable that I have been having trouble with. https://github.com/pyinstaller/pyinstaller/pull/3439#issuecomment-379064155\r\n\r\nThis is the source code https://github.com/Siecje/qml-testing/tree/PyInstaller\r\n\r\nI'm using\r\n- Windows 7 32-bit\r\n- Qt 5.10.1\r\n- PyQt5 compiled from source\r\n- Python 3.4.4\r\n- pip install https://github.com/pyinstaller/pyinstaller/archive/develop.zip\r\nWhen I run the .exe I get an error\r\n\r\n```\r\nQWindowsEGLStaticContext::create: Failed to load and resolve libEGL function\r\nFailed to load opengl32sw.dll (The specified module could not be found.)\r\nFailed to load and resolve WGL/OpenGL functions\r\nFailed to create OpenGL context for format QsurfaceFormat(version 2.0, options QFlags<QSurfaceFormat::FormatOption>(), depthBufferSize 24, ...<snip>\r\nThis is most likely caused by not having the necessary graphics drivers installed.\r\n\r\nInstall a driver providing OpenGL 2.0 or higher, or, if this is not possible, make sure the ANGLE Open GL ES 2.0 emulation libraries (libEGL.dll, libLESv2.dll and d3dcompiler_*.dll) are available in the application executabl's directory or in a location listed in PATH.\r\n```\r\n\r\nTo run the application I can copy these four .dlls into the `dist\\main\\` directory.\r\n\r\n- libEGL.dll\r\n- libGLESv2.dll\r\n- d3dcompiler_47.dll\r\n- opengl32sw.dll\r\n\r\nWhen I run it I get Command Prompt window with this output.\r\n```\r\nQWindowsWGLStaticContext::create: Could not initialize EGL display: error 0x3001\r\nQWindowsWGLStaticContext::create: When using ANGLE, check if d3dcompiler_4x.dll is available.\r\n```\r\nInstead of copying those .dll files I can add the Qt bin directory to my PATH.\r\n\r\n```\r\nset PATH=%PATH%;C:\\Qt\\5.10.1\\msvc2015\\bin\r\ncall main.exe\r\nQWindowsWGLStaticContext::create: Could not initialize EGL display: error 0x3001\r\nQWindowsWGLStaticContext::create: When using ANGLE, check if d3dcompiler_4x.dll is available.\r\n```\r\n\r\nWhen I copy the `dist\\main\\` to another computer (Windows 10).\r\n\r\nI have to set two environment variables before the application will work.\r\n\r\n```\r\nset QT_QPA_PLATFORM_PLUGIN_PATH=%exeDir%\\PyQt5\\Qt\\plugins\\platforms\r\nset QML2_IMPORT_PATH=%exeDir%\\PyQt5\\Qt\\qml\r\n```\r\n\r\nThere are no error messages on the Windows 10 computer with these two environment variables set.\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2018, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\nimport os\n\nfrom PyInstaller.utils.hooks import pyqt5_library_info, collect_system_data_files\n\nhiddenimports = ['sip']\n\n# Collect the ``qt.conf`` file.\ndatas = [x for x in\n collect_system_data_files(pyqt5_library_info.location['PrefixPath'],\n 'PyQt5')\n if os.path.basename(x[0]) == 'qt.conf']\n\n# Include ICU files, if they exist. See the \"Deployment approach\" section in\n# ``PyInstaller/utils/hooks/qt.py``.\n[(os.path.join(pyqt5_library_info.location['BinariesPath'], dll),\n os.path.join('PyQt5', 'Qt', 'bin', dll))\n for dll in ('icudt??.dll', 'icuin??.dll', 'icuuc??.dll')]\n\n# TODO: Include software rendering for OpenGL. See the \"Deployment approach\". However, because the standard PyQt5 wheel `doesn't include <https://www.riverbankcomputing.com/pipermail/pyqt/2018-June/040387.html>`_ ``d3dcompiler_XX.dll``, this produces failures. When the wheel is updated, this code can be uncommented.\n##binaries = []\n##for dll in ('libEGL.dll', 'libGLESv2.dll', 'd3dcompiler_??.dll', 'opengl32sw.dll'):\n## dll_path = os.path.join(pyqt5_library_info.location['BinariesPath'], dll)\n## # Only add files if they exist.\n## if glob(dll_path):\n## binaries += [(dll_path, os.path.join('PyQt5', 'Qt', 'bin', dll))]\n", "path": "PyInstaller/hooks/hook-PyQt5.py"}]}
1,689
918
gh_patches_debug_6456
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-contrib-1427
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [opentelemetry-instrumentation-redis] Stop sending db.name argument or rename it to redis[0-15] Hi! I've been recently playing with OpenTelemetry for Python (Flask) application and noticed that for Redis **db.name** argument is send to OpenTelemetry collector which seems to be a number of database (integer). This seems to be incorrect as in Redis there is no db name concept (databases are numbered from 0 to 15). Technically, it shouldn't be any problem with that but it may break some OpenTelemetry backends which expects a real DB name not a number. I have done some additional debugging and found that for node.js and .NET **db.name** argument is not send to collector. Shouldn't we have some consistency here? **Describe your environment** $ python --version Python 3.8.13 $ $ pip list | grep 'opentelemetry\|redis' opentelemetry-api 1.13.0 opentelemetry-distro 0.34b0 opentelemetry-exporter-otlp 1.13.0 opentelemetry-exporter-otlp-proto-grpc 1.13.0 opentelemetry-exporter-otlp-proto-http 1.13.0 opentelemetry-instrumentation 0.34b0 opentelemetry-instrumentation-aws-lambda 0.34b0 opentelemetry-instrumentation-dbapi 0.34b0 opentelemetry-instrumentation-flask 0.34b0 opentelemetry-instrumentation-grpc 0.34b0 opentelemetry-instrumentation-jinja2 0.34b0 opentelemetry-instrumentation-logging 0.34b0 opentelemetry-instrumentation-redis 0.34b0 opentelemetry-instrumentation-requests 0.34b0 opentelemetry-instrumentation-sqlite3 0.34b0 opentelemetry-instrumentation-urllib 0.34b0 opentelemetry-instrumentation-urllib3 0.34b0 opentelemetry-instrumentation-wsgi 0.34b0 opentelemetry-propagator-aws-xray 1.0.1 opentelemetry-proto 1.13.0 opentelemetry-sdk 1.13.0 opentelemetry-semantic-conventions 0.34b0 opentelemetry-util-http 0.34b0 redis 4.3.4 **Steps to reproduce** Any Python app with connection to Redis will show this behavior. **What is the expected behavior?** Stop sending db.name argument or rename it to redis[0-15] **What is the actual behavior?** The db.name argument is send as a number of Redis database. **Additional context** Please see below some logs from OpenTelemetry collector for python and node.js to see a difference. ===> PYTHON EXAMPLE ScopeSpans #0 ScopeSpans SchemaURL: InstrumentationScope opentelemetry.instrumentation.redis 0.34b0 Span #0 Trace ID : 4bc10b43ab0a0d3042f38ebbb32baef1 Parent ID : 79e2aed933827894 ID : 22f4fba607e73a33 Name : HMSET Kind : SPAN_KIND_CLIENT Start time : 2022-10-21 09:40:50.606962566 +0000 UTC End time : 2022-10-21 09:40:50.609568624 +0000 UTC Status code : STATUS_CODE_UNSET Status message : Attributes: -> db.statement: STRING(HMSET person1-hash name jane age 20) -> db.system: STRING(redis) -> db.name: INT(0) -> db.redis.database_index: INT(0) -> net.peer.name: STRING(redis-svc) -> net.peer.port: STRING(6379) -> net.transport: STRING(ip_tcp) -> db.redis.args_length: INT(6) ===> NODEJS EXAMPLE ScopeSpans #0 ScopeSpans SchemaURL: InstrumentationScope @opentelemetry/instrumentation-redis-4 0.33.0 Span #0 Trace ID : 21a071f4d1d7c860ecb758398d304f60 Parent ID : 1bbf5328c079ceda ID : 13dc47b2521f7f82 Name : redis-GET Kind : SPAN_KIND_CLIENT Start time : 2022-10-21 09:47:16.9553723 +0000 UTC End time : 2022-10-21 09:47:16.957585 +0000 UTC Status code : STATUS_CODE_UNSET Status message : Attributes: -> db.system: STRING(redis) -> net.peer.name: STRING(redis-svc) -> net.peer.port: INT(6379) -> db.statement: STRING(GET) ResourceSpans #4 Resource SchemaURL: Resource labels: -> service.name: STRING(nodejs-redis) -> telemetry.sdk.language: STRING(nodejs) -> telemetry.sdk.name: STRING(opentelemetry) -> telemetry.sdk.version: STRING(0.24.0) I am happy to contribute to it by reviewing the code fix and testing the behavior. @svrnm @sanketmehta28 </issue> <code> [start of instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/util.py] 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 # 15 """ 16 Some utils used by the redis integration 17 """ 18 from opentelemetry.semconv.trace import ( 19 DbSystemValues, 20 NetTransportValues, 21 SpanAttributes, 22 ) 23 24 25 def _extract_conn_attributes(conn_kwargs): 26 """Transform redis conn info into dict""" 27 attributes = { 28 SpanAttributes.DB_SYSTEM: DbSystemValues.REDIS.value, 29 } 30 db = conn_kwargs.get("db", 0) 31 attributes[SpanAttributes.DB_NAME] = db 32 attributes[SpanAttributes.DB_REDIS_DATABASE_INDEX] = db 33 try: 34 attributes[SpanAttributes.NET_PEER_NAME] = conn_kwargs.get( 35 "host", "localhost" 36 ) 37 attributes[SpanAttributes.NET_PEER_PORT] = conn_kwargs.get( 38 "port", 6379 39 ) 40 attributes[ 41 SpanAttributes.NET_TRANSPORT 42 ] = NetTransportValues.IP_TCP.value 43 except KeyError: 44 attributes[SpanAttributes.NET_PEER_NAME] = conn_kwargs.get("path", "") 45 attributes[ 46 SpanAttributes.NET_TRANSPORT 47 ] = NetTransportValues.UNIX.value 48 49 return attributes 50 51 52 def _format_command_args(args): 53 """Format command arguments and trim them as needed""" 54 value_max_len = 100 55 value_too_long_mark = "..." 56 cmd_max_len = 1000 57 length = 0 58 out = [] 59 for arg in args: 60 cmd = str(arg) 61 62 if len(cmd) > value_max_len: 63 cmd = cmd[:value_max_len] + value_too_long_mark 64 65 if length + len(cmd) > cmd_max_len: 66 prefix = cmd[: cmd_max_len - length] 67 out.append(f"{prefix}{value_too_long_mark}") 68 break 69 70 out.append(cmd) 71 length += len(cmd) 72 73 return " ".join(out) 74 [end of instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/util.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/util.py b/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/util.py --- a/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/util.py +++ b/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/util.py @@ -28,7 +28,6 @@ SpanAttributes.DB_SYSTEM: DbSystemValues.REDIS.value, } db = conn_kwargs.get("db", 0) - attributes[SpanAttributes.DB_NAME] = db attributes[SpanAttributes.DB_REDIS_DATABASE_INDEX] = db try: attributes[SpanAttributes.NET_PEER_NAME] = conn_kwargs.get(
{"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/util.py b/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/util.py\n--- a/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/util.py\n+++ b/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/util.py\n@@ -28,7 +28,6 @@\n SpanAttributes.DB_SYSTEM: DbSystemValues.REDIS.value,\n }\n db = conn_kwargs.get(\"db\", 0)\n- attributes[SpanAttributes.DB_NAME] = db\n attributes[SpanAttributes.DB_REDIS_DATABASE_INDEX] = db\n try:\n attributes[SpanAttributes.NET_PEER_NAME] = conn_kwargs.get(\n", "issue": "[opentelemetry-instrumentation-redis] Stop sending db.name argument or rename it to redis[0-15]\nHi! I've been recently playing with OpenTelemetry for Python (Flask) application and noticed that for Redis **db.name** argument is send to OpenTelemetry collector which seems to be a number of database (integer). This seems to be incorrect as in Redis there is no db name concept (databases are numbered from 0 to 15). Technically, it shouldn't be any problem with that but it may break some OpenTelemetry backends which expects a real DB name not a number. I have done some additional debugging and found that for node.js and .NET **db.name** argument is not send to collector. Shouldn't we have some consistency here?\r\n\r\n**Describe your environment** \r\n\r\n$ python --version\r\nPython 3.8.13\r\n$ \r\n$ pip list | grep 'opentelemetry\\|redis'\r\nopentelemetry-api 1.13.0\r\nopentelemetry-distro 0.34b0\r\nopentelemetry-exporter-otlp 1.13.0\r\nopentelemetry-exporter-otlp-proto-grpc 1.13.0\r\nopentelemetry-exporter-otlp-proto-http 1.13.0\r\nopentelemetry-instrumentation 0.34b0\r\nopentelemetry-instrumentation-aws-lambda 0.34b0\r\nopentelemetry-instrumentation-dbapi 0.34b0\r\nopentelemetry-instrumentation-flask 0.34b0\r\nopentelemetry-instrumentation-grpc 0.34b0\r\nopentelemetry-instrumentation-jinja2 0.34b0\r\nopentelemetry-instrumentation-logging 0.34b0\r\nopentelemetry-instrumentation-redis 0.34b0\r\nopentelemetry-instrumentation-requests 0.34b0\r\nopentelemetry-instrumentation-sqlite3 0.34b0\r\nopentelemetry-instrumentation-urllib 0.34b0\r\nopentelemetry-instrumentation-urllib3 0.34b0\r\nopentelemetry-instrumentation-wsgi 0.34b0\r\nopentelemetry-propagator-aws-xray 1.0.1\r\nopentelemetry-proto 1.13.0\r\nopentelemetry-sdk 1.13.0\r\nopentelemetry-semantic-conventions 0.34b0\r\nopentelemetry-util-http 0.34b0\r\nredis 4.3.4\r\n\r\n**Steps to reproduce**\r\nAny Python app with connection to Redis will show this behavior.\r\n\r\n**What is the expected behavior?**\r\nStop sending db.name argument or rename it to redis[0-15]\r\n\r\n**What is the actual behavior?**\r\nThe db.name argument is send as a number of Redis database.\r\n\r\n**Additional context**\r\n\r\nPlease see below some logs from OpenTelemetry collector for python and node.js to see a difference.\r\n\r\n===> PYTHON EXAMPLE\r\n\r\nScopeSpans #0\r\nScopeSpans SchemaURL: \r\nInstrumentationScope opentelemetry.instrumentation.redis 0.34b0\r\nSpan #0\r\n Trace ID : 4bc10b43ab0a0d3042f38ebbb32baef1\r\n Parent ID : 79e2aed933827894\r\n ID : 22f4fba607e73a33\r\n Name : HMSET\r\n Kind : SPAN_KIND_CLIENT\r\n Start time : 2022-10-21 09:40:50.606962566 +0000 UTC\r\n End time : 2022-10-21 09:40:50.609568624 +0000 UTC\r\n Status code : STATUS_CODE_UNSET\r\n Status message : \r\nAttributes:\r\n -> db.statement: STRING(HMSET person1-hash name jane age 20)\r\n -> db.system: STRING(redis)\r\n -> db.name: INT(0)\r\n -> db.redis.database_index: INT(0)\r\n -> net.peer.name: STRING(redis-svc)\r\n -> net.peer.port: STRING(6379)\r\n -> net.transport: STRING(ip_tcp)\r\n -> db.redis.args_length: INT(6)\r\n\r\n\r\n===> NODEJS EXAMPLE\r\n\r\nScopeSpans #0\r\nScopeSpans SchemaURL: \r\nInstrumentationScope @opentelemetry/instrumentation-redis-4 0.33.0\r\nSpan #0\r\n Trace ID : 21a071f4d1d7c860ecb758398d304f60\r\n Parent ID : 1bbf5328c079ceda\r\n ID : 13dc47b2521f7f82\r\n Name : redis-GET\r\n Kind : SPAN_KIND_CLIENT\r\n Start time : 2022-10-21 09:47:16.9553723 +0000 UTC\r\n End time : 2022-10-21 09:47:16.957585 +0000 UTC\r\n Status code : STATUS_CODE_UNSET\r\n Status message : \r\nAttributes:\r\n -> db.system: STRING(redis)\r\n -> net.peer.name: STRING(redis-svc)\r\n -> net.peer.port: INT(6379)\r\n -> db.statement: STRING(GET)\r\nResourceSpans #4\r\nResource SchemaURL: \r\nResource labels:\r\n -> service.name: STRING(nodejs-redis)\r\n -> telemetry.sdk.language: STRING(nodejs)\r\n -> telemetry.sdk.name: STRING(opentelemetry)\r\n -> telemetry.sdk.version: STRING(0.24.0)\r\n \r\nI am happy to contribute to it by reviewing the code fix and testing the behavior.\r\n\r\n@svrnm @sanketmehta28\r\n \n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"\nSome utils used by the redis integration\n\"\"\"\nfrom opentelemetry.semconv.trace import (\n DbSystemValues,\n NetTransportValues,\n SpanAttributes,\n)\n\n\ndef _extract_conn_attributes(conn_kwargs):\n \"\"\"Transform redis conn info into dict\"\"\"\n attributes = {\n SpanAttributes.DB_SYSTEM: DbSystemValues.REDIS.value,\n }\n db = conn_kwargs.get(\"db\", 0)\n attributes[SpanAttributes.DB_NAME] = db\n attributes[SpanAttributes.DB_REDIS_DATABASE_INDEX] = db\n try:\n attributes[SpanAttributes.NET_PEER_NAME] = conn_kwargs.get(\n \"host\", \"localhost\"\n )\n attributes[SpanAttributes.NET_PEER_PORT] = conn_kwargs.get(\n \"port\", 6379\n )\n attributes[\n SpanAttributes.NET_TRANSPORT\n ] = NetTransportValues.IP_TCP.value\n except KeyError:\n attributes[SpanAttributes.NET_PEER_NAME] = conn_kwargs.get(\"path\", \"\")\n attributes[\n SpanAttributes.NET_TRANSPORT\n ] = NetTransportValues.UNIX.value\n\n return attributes\n\n\ndef _format_command_args(args):\n \"\"\"Format command arguments and trim them as needed\"\"\"\n value_max_len = 100\n value_too_long_mark = \"...\"\n cmd_max_len = 1000\n length = 0\n out = []\n for arg in args:\n cmd = str(arg)\n\n if len(cmd) > value_max_len:\n cmd = cmd[:value_max_len] + value_too_long_mark\n\n if length + len(cmd) > cmd_max_len:\n prefix = cmd[: cmd_max_len - length]\n out.append(f\"{prefix}{value_too_long_mark}\")\n break\n\n out.append(cmd)\n length += len(cmd)\n\n return \" \".join(out)\n", "path": "instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/util.py"}]}
2,595
182
gh_patches_debug_24813
rasdani/github-patches
git_diff
scrapy__scrapy-2577
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> BrowserLikeContextFactory not available in some conditions While tracing the error that lead to #2555 I tried the workaround [mentioned in the documentation](https://doc.scrapy.org/en/latest/topics/settings.html#downloader-clientcontextfactory) without success. This code lives [incontextfactory.py](https://github.com/scrapy/scrapy/blob/c3411373e8a8ee2786588bdad7be469c69a25e2a/scrapy/core/downloader/contextfactory.py#L63) but was not reachable as the import was failing on my system due to #2555. This file is a large `try/except` block with many potential points of failure and it's likely to trip other users in the future. That said, could this be refactored to provide a fallback for `BrowserLikeContextFactory` or otherwise reduce the scope of the `try/except` to avoid breaking the API? </issue> <code> [start of scrapy/core/downloader/contextfactory.py] 1 from OpenSSL import SSL 2 from twisted.internet.ssl import ClientContextFactory 3 4 try: 5 6 from zope.interface.declarations import implementer 7 8 # the following should be available from Twisted 14.0.0 9 from twisted.internet.ssl import (optionsForClientTLS, 10 CertificateOptions, 11 platformTrust) 12 13 from twisted.web.client import BrowserLikePolicyForHTTPS 14 from twisted.web.iweb import IPolicyForHTTPS 15 16 from scrapy.core.downloader.tls import ScrapyClientTLSOptions, DEFAULT_CIPHERS 17 18 19 @implementer(IPolicyForHTTPS) 20 class ScrapyClientContextFactory(BrowserLikePolicyForHTTPS): 21 """ 22 Non-peer-certificate verifying HTTPS context factory 23 24 Default OpenSSL method is TLS_METHOD (also called SSLv23_METHOD) 25 which allows TLS protocol negotiation 26 27 'A TLS/SSL connection established with [this method] may 28 understand the SSLv3, TLSv1, TLSv1.1 and TLSv1.2 protocols.' 29 """ 30 31 def __init__(self, method=SSL.SSLv23_METHOD, *args, **kwargs): 32 super(ScrapyClientContextFactory, self).__init__(*args, **kwargs) 33 self._ssl_method = method 34 35 def getCertificateOptions(self): 36 # setting verify=True will require you to provide CAs 37 # to verify against; in other words: it's not that simple 38 39 # backward-compatible SSL/TLS method: 40 # 41 # * this will respect `method` attribute in often recommended 42 # `ScrapyClientContextFactory` subclass 43 # (https://github.com/scrapy/scrapy/issues/1429#issuecomment-131782133) 44 # 45 # * getattr() for `_ssl_method` attribute for context factories 46 # not calling super(..., self).__init__ 47 return CertificateOptions(verify=False, 48 method=getattr(self, 'method', 49 getattr(self, '_ssl_method', None)), 50 fixBrokenPeers=True, 51 acceptableCiphers=DEFAULT_CIPHERS) 52 53 # kept for old-style HTTP/1.0 downloader context twisted calls, 54 # e.g. connectSSL() 55 def getContext(self, hostname=None, port=None): 56 return self.getCertificateOptions().getContext() 57 58 def creatorForNetloc(self, hostname, port): 59 return ScrapyClientTLSOptions(hostname.decode("ascii"), self.getContext()) 60 61 62 @implementer(IPolicyForHTTPS) 63 class BrowserLikeContextFactory(ScrapyClientContextFactory): 64 """ 65 Twisted-recommended context factory for web clients. 66 67 Quoting http://twistedmatrix.com/documents/current/api/twisted.web.client.Agent.html: 68 "The default is to use a BrowserLikePolicyForHTTPS, 69 so unless you have special requirements you can leave this as-is." 70 71 creatorForNetloc() is the same as BrowserLikePolicyForHTTPS 72 except this context factory allows setting the TLS/SSL method to use. 73 74 Default OpenSSL method is TLS_METHOD (also called SSLv23_METHOD) 75 which allows TLS protocol negotiation. 76 """ 77 def creatorForNetloc(self, hostname, port): 78 79 # trustRoot set to platformTrust() will use the platform's root CAs. 80 # 81 # This means that a website like https://www.cacert.org will be rejected 82 # by default, since CAcert.org CA certificate is seldom shipped. 83 return optionsForClientTLS(hostname.decode("ascii"), 84 trustRoot=platformTrust(), 85 extraCertificateOptions={ 86 'method': self._ssl_method, 87 }) 88 89 except ImportError: 90 91 class ScrapyClientContextFactory(ClientContextFactory): 92 "A SSL context factory which is more permissive against SSL bugs." 93 # see https://github.com/scrapy/scrapy/issues/82 94 # and https://github.com/scrapy/scrapy/issues/26 95 # and https://github.com/scrapy/scrapy/issues/981 96 97 def __init__(self, method=SSL.SSLv23_METHOD): 98 self.method = method 99 100 def getContext(self, hostname=None, port=None): 101 ctx = ClientContextFactory.getContext(self) 102 # Enable all workarounds to SSL bugs as documented by 103 # http://www.openssl.org/docs/ssl/SSL_CTX_set_options.html 104 ctx.set_options(SSL.OP_ALL) 105 return ctx 106 [end of scrapy/core/downloader/contextfactory.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scrapy/core/downloader/contextfactory.py b/scrapy/core/downloader/contextfactory.py --- a/scrapy/core/downloader/contextfactory.py +++ b/scrapy/core/downloader/contextfactory.py @@ -1,15 +1,15 @@ from OpenSSL import SSL from twisted.internet.ssl import ClientContextFactory -try: +from scrapy import twisted_version + +if twisted_version >= (14, 0, 0): from zope.interface.declarations import implementer - # the following should be available from Twisted 14.0.0 from twisted.internet.ssl import (optionsForClientTLS, CertificateOptions, platformTrust) - from twisted.web.client import BrowserLikePolicyForHTTPS from twisted.web.iweb import IPolicyForHTTPS @@ -86,7 +86,7 @@ 'method': self._ssl_method, }) -except ImportError: +else: class ScrapyClientContextFactory(ClientContextFactory): "A SSL context factory which is more permissive against SSL bugs."
{"golden_diff": "diff --git a/scrapy/core/downloader/contextfactory.py b/scrapy/core/downloader/contextfactory.py\n--- a/scrapy/core/downloader/contextfactory.py\n+++ b/scrapy/core/downloader/contextfactory.py\n@@ -1,15 +1,15 @@\n from OpenSSL import SSL\n from twisted.internet.ssl import ClientContextFactory\n \n-try:\n+from scrapy import twisted_version\n+\n+if twisted_version >= (14, 0, 0):\n \n from zope.interface.declarations import implementer\n \n- # the following should be available from Twisted 14.0.0\n from twisted.internet.ssl import (optionsForClientTLS,\n CertificateOptions,\n platformTrust)\n-\n from twisted.web.client import BrowserLikePolicyForHTTPS\n from twisted.web.iweb import IPolicyForHTTPS\n \n@@ -86,7 +86,7 @@\n 'method': self._ssl_method,\n })\n \n-except ImportError:\n+else:\n \n class ScrapyClientContextFactory(ClientContextFactory):\n \"A SSL context factory which is more permissive against SSL bugs.\"\n", "issue": "BrowserLikeContextFactory not available in some conditions\nWhile tracing the error that lead to #2555 I tried the workaround [mentioned in the documentation](https://doc.scrapy.org/en/latest/topics/settings.html#downloader-clientcontextfactory) without success.\r\n\r\nThis code lives [incontextfactory.py](https://github.com/scrapy/scrapy/blob/c3411373e8a8ee2786588bdad7be469c69a25e2a/scrapy/core/downloader/contextfactory.py#L63) but was not reachable as the import was failing on my system due to #2555.\r\n\r\nThis file is a large `try/except` block with many potential points of failure and it's likely to trip other users in the future.\r\nThat said, could this be refactored to provide a fallback for `BrowserLikeContextFactory` or otherwise reduce the scope of the `try/except` to avoid breaking the API?\n", "before_files": [{"content": "from OpenSSL import SSL\nfrom twisted.internet.ssl import ClientContextFactory\n\ntry:\n\n from zope.interface.declarations import implementer\n\n # the following should be available from Twisted 14.0.0\n from twisted.internet.ssl import (optionsForClientTLS,\n CertificateOptions,\n platformTrust)\n\n from twisted.web.client import BrowserLikePolicyForHTTPS\n from twisted.web.iweb import IPolicyForHTTPS\n\n from scrapy.core.downloader.tls import ScrapyClientTLSOptions, DEFAULT_CIPHERS\n\n\n @implementer(IPolicyForHTTPS)\n class ScrapyClientContextFactory(BrowserLikePolicyForHTTPS):\n \"\"\"\n Non-peer-certificate verifying HTTPS context factory\n\n Default OpenSSL method is TLS_METHOD (also called SSLv23_METHOD)\n which allows TLS protocol negotiation\n\n 'A TLS/SSL connection established with [this method] may\n understand the SSLv3, TLSv1, TLSv1.1 and TLSv1.2 protocols.'\n \"\"\"\n\n def __init__(self, method=SSL.SSLv23_METHOD, *args, **kwargs):\n super(ScrapyClientContextFactory, self).__init__(*args, **kwargs)\n self._ssl_method = method\n\n def getCertificateOptions(self):\n # setting verify=True will require you to provide CAs\n # to verify against; in other words: it's not that simple\n\n # backward-compatible SSL/TLS method:\n #\n # * this will respect `method` attribute in often recommended\n # `ScrapyClientContextFactory` subclass\n # (https://github.com/scrapy/scrapy/issues/1429#issuecomment-131782133)\n #\n # * getattr() for `_ssl_method` attribute for context factories\n # not calling super(..., self).__init__\n return CertificateOptions(verify=False,\n method=getattr(self, 'method',\n getattr(self, '_ssl_method', None)),\n fixBrokenPeers=True,\n acceptableCiphers=DEFAULT_CIPHERS)\n\n # kept for old-style HTTP/1.0 downloader context twisted calls,\n # e.g. connectSSL()\n def getContext(self, hostname=None, port=None):\n return self.getCertificateOptions().getContext()\n\n def creatorForNetloc(self, hostname, port):\n return ScrapyClientTLSOptions(hostname.decode(\"ascii\"), self.getContext())\n\n\n @implementer(IPolicyForHTTPS)\n class BrowserLikeContextFactory(ScrapyClientContextFactory):\n \"\"\"\n Twisted-recommended context factory for web clients.\n\n Quoting http://twistedmatrix.com/documents/current/api/twisted.web.client.Agent.html:\n \"The default is to use a BrowserLikePolicyForHTTPS,\n so unless you have special requirements you can leave this as-is.\"\n\n creatorForNetloc() is the same as BrowserLikePolicyForHTTPS\n except this context factory allows setting the TLS/SSL method to use.\n\n Default OpenSSL method is TLS_METHOD (also called SSLv23_METHOD)\n which allows TLS protocol negotiation.\n \"\"\"\n def creatorForNetloc(self, hostname, port):\n\n # trustRoot set to platformTrust() will use the platform's root CAs.\n #\n # This means that a website like https://www.cacert.org will be rejected\n # by default, since CAcert.org CA certificate is seldom shipped.\n return optionsForClientTLS(hostname.decode(\"ascii\"),\n trustRoot=platformTrust(),\n extraCertificateOptions={\n 'method': self._ssl_method,\n })\n\nexcept ImportError:\n\n class ScrapyClientContextFactory(ClientContextFactory):\n \"A SSL context factory which is more permissive against SSL bugs.\"\n # see https://github.com/scrapy/scrapy/issues/82\n # and https://github.com/scrapy/scrapy/issues/26\n # and https://github.com/scrapy/scrapy/issues/981\n\n def __init__(self, method=SSL.SSLv23_METHOD):\n self.method = method\n\n def getContext(self, hostname=None, port=None):\n ctx = ClientContextFactory.getContext(self)\n # Enable all workarounds to SSL bugs as documented by\n # http://www.openssl.org/docs/ssl/SSL_CTX_set_options.html\n ctx.set_options(SSL.OP_ALL)\n return ctx\n", "path": "scrapy/core/downloader/contextfactory.py"}]}
1,889
230
gh_patches_debug_500
rasdani/github-patches
git_diff
scikit-hep__uproot5-270
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Histogram protocol issue? I am confused about `PlottableAxisContinuous.__getitem__`: for uproot4 it seems to give the one-indexed values where I was expecting zero-indexed, since `__len__` gives the number of bins without flow. For example: ``` >>> h = uproot.open('https://raw.githubusercontent.com/CoffeaTeam/coffea/master/tests/samples/testSF2d.histo.root:scalefactors_Tight_Electron') >>> axis = h.axes[0] >>> len(axis) 10 >>> axis.edges() array([-2.5 , -2. , -1.566, -1.444, -0.8 , 0. , 0.8 , 1.444, 1.566, 2. , 2.5 ]) >>> axis.edges().shape (11,) >>> axis[0] (-2.0, -1.566) ``` </issue> <code> [start of uproot/behaviors/TAxis.py] 1 # BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/main/LICENSE 2 3 """ 4 This module defines the behaviors of ``TAxis``, an axis of a histogram or profile plot. 5 """ 6 7 from __future__ import absolute_import 8 9 try: 10 from collections.abc import Sequence 11 except ImportError: 12 from collections import Sequence 13 14 import numpy 15 16 17 class AxisTraits(object): 18 """ 19 Describes read-only properties of a histogram axis. 20 21 For example, ``axis.traits.discrete`` is True if the histogram has 22 labels; False otherwise. 23 """ 24 25 def __init__(self, axis): 26 self._axis = axis 27 28 def __repr__(self): 29 return "AxisTraits({0})".format(repr(self._axis)) 30 31 @property 32 def circular(self): 33 """ 34 True if the axis "wraps around" (always False for ROOT histograms). 35 """ 36 return False 37 38 @property 39 def discrete(self): 40 """ 41 True if bins are discrete: if they have string-valued labels. 42 """ 43 fNbins = self._axis.member("fNbins") 44 fLabels = self._axis.member("fLabels", none_if_missing=True) 45 return fLabels is not None and len(fLabels) == fNbins 46 47 48 class TAxis(Sequence): 49 def __len__(self): 50 """ 51 The number of bins in the axis. 52 """ 53 return self.member("fNbins") 54 55 def __getitem__(self, where): 56 """ 57 Returns the label at ``where`` if it exists or the interval at ``where``. 58 59 The indexing assumes that ``flow=False``. 60 """ 61 fNbins = self.member("fNbins") 62 fXbins = self.member("fXbins", none_if_missing=True) 63 fLabels = self.member("fLabels", none_if_missing=True) 64 65 if fLabels is not None and len(fLabels) == fNbins: 66 return str(fLabels[where]) 67 68 elif fXbins is None or len(fXbins) != fNbins + 1: 69 fXmin, fXmax = self.member("fXmin"), self.member("fXmax") 70 low = (fXmax - fXmin) * (where) / float(fNbins) + fXmin 71 high = (fXmax - fXmin) * (where + 1) / float(fNbins) + fXmin 72 return low, high 73 74 else: 75 return fXbins[where + 1], fXbins[where + 2] 76 77 def __iter__(self): 78 """ 79 Iterate over the output of ``__getitem__``. 80 """ 81 fNbins = self.member("fNbins") 82 fLabels = self.member("fLabels", none_if_missing=True) 83 84 if fLabels is not None and len(fLabels) == fNbins: 85 for x in fLabels: 86 yield str(x) 87 else: 88 for low, high in self.intervals(): 89 yield low, high 90 91 def __eq__(self, other): 92 """ 93 Two axes are equal if they have the same type and 94 ``list(self) == list(other)``. 95 """ 96 if type(self) is not type(other): 97 return False 98 99 self_fNbins = self.member("fNbins") 100 other_fNbins = other.member("fNbins") 101 if self_fNbins != other_fNbins: 102 return False 103 104 self_fLabels = self.member("fLabels", none_if_missing=True) 105 other_fLabels = other.member("fLabels", none_if_missing=True) 106 self_labeled = self_fLabels is not None and len(self_fLabels) == self_fNbins 107 other_labeled = other_fLabels is not None and len(other_fLabels) == other_fNbins 108 109 if self_labeled and other_labeled: 110 return all(x == y for x, y in zip(self_fLabels, other_fLabels)) 111 elif not self_labeled and not other_labeled: 112 return numpy.array_equal(self.edges(), other.edges()) 113 else: 114 return False 115 116 def __ne__(self, other): 117 """ 118 Some versions of Python don't automatically negate __eq__. 119 """ 120 return not self.__eq__(other) 121 122 @property 123 def traits(self): 124 """ 125 Describes read-only properties of a histogram axis. 126 127 For example, ``axis.traits.discrete`` is True if the histogram has 128 labels; False otherwise. 129 """ 130 return AxisTraits(self) 131 132 @property 133 def low(self): 134 """ 135 The low edge of the first normal (finite-width) bin. 136 137 For ROOT histograms, numerical edges exist even if the axis also has 138 string-valued labels. 139 """ 140 return self.member("fXmin") 141 142 @property 143 def high(self): 144 """ 145 The high edge of the last normal (finite-width) bin. 146 147 For ROOT histograms, numerical edges exist even if the axis also has 148 string-valued labels. 149 """ 150 return self.member("fXmax") 151 152 @property 153 def width(self): 154 """ 155 The average bin width (or only bin width if the binning is uniform). 156 """ 157 fNbins = self.member("fNbins") 158 fXbins = self.member("fXbins", none_if_missing=True) 159 160 if fXbins is None or len(fXbins) != fNbins + 1: 161 return (self.member("fXmax") - self.member("fXmin")) / fNbins 162 else: 163 return self.widths().mean() 164 165 def labels(self, flow=False): 166 """ 167 Args: 168 flow (bool): If True, include ``"underflow"`` and ``"overflow"`` 169 before and after the normal (finite-width) bin labels (if they 170 exist). 171 172 If string-valued labels exist, this returns them as a Python list of 173 Python strings. Otherwise, this returns None. 174 175 Setting ``flow=True`` increases the length of the output by two. 176 """ 177 fNbins = self.member("fNbins") 178 fLabels = self.member("fLabels", none_if_missing=True) 179 180 if fLabels is not None and len(fLabels) == fNbins: 181 out = [str(x) for x in fLabels] 182 if flow: 183 return ["underflow"] + out + ["overflow"] 184 else: 185 return out 186 else: 187 return None 188 189 def edges(self, flow=False): 190 """ 191 Args: 192 flow (bool): If True, include ``-inf`` and ``inf`` before and 193 after the normal (finite-width) bin edges. 194 195 Returns numerical edges between bins as a one-dimensional ``numpy.ndarray`` 196 of ``numpy.float64``. 197 198 Even with ``flow=False``, the number of edges is *one greater than* the 199 number of normal (finite-width) bins because they represent "fenceposts" 200 between the bins, including one below and one above the full range. 201 202 Setting ``flow=True`` increases the length of the output by two. 203 204 For ROOT histograms, numerical edges exist even if the axis also has 205 string-valued labels. 206 """ 207 fNbins = self.member("fNbins") 208 fXbins = self.member("fXbins", none_if_missing=True) 209 210 if fXbins is None or len(fXbins) != fNbins + 1: 211 fXbins = numpy.linspace( 212 self.member("fXmin"), self.member("fXmax"), fNbins + 1 213 ) 214 215 if flow: 216 out = numpy.empty(fNbins + 3, dtype=numpy.float64) 217 out[0] = -numpy.inf 218 out[-1] = numpy.inf 219 out[1:-1] = fXbins 220 else: 221 out = numpy.asarray(fXbins, dtype=fXbins.dtype.newbyteorder("=")) 222 223 return out 224 225 def intervals(self, flow=False): 226 """ 227 Args: 228 flow (bool): If True, include ``[-inf, min]`` and ``[max, inf]`` 229 before and after the normal (finite-width) intervals. 230 231 Returns low, high pairs for each bin interval as a two-dimensional 232 ``numpy.ndarray`` of ``numpy.float64``. 233 234 With ``flow=False``, the number of intervals is equal to the number of 235 normal (finite-width) bins. 236 237 Setting ``flow=True`` increases the length of the output by two. 238 239 For ROOT histograms, numerical intervals exist even if the axis also has 240 string-valued labels. 241 """ 242 fNbins = self.member("fNbins") 243 fXbins = self.member("fXbins", none_if_missing=True) 244 245 if fXbins is None or len(fXbins) != fNbins + 1: 246 fXbins = numpy.linspace( 247 self.member("fXmin"), self.member("fXmax"), fNbins + 1 248 ) 249 250 if flow: 251 out = numpy.empty((fNbins + 2, 2), dtype=numpy.float64) 252 out[0, 0] = -numpy.inf 253 out[-1, 1] = numpy.inf 254 out[1:, 0] = fXbins 255 out[:-1, 1] = fXbins 256 else: 257 out = numpy.empty((fNbins, 2), dtype=numpy.float64) 258 out[:, 0] = fXbins[:-1] 259 out[:, 1] = fXbins[1:] 260 261 return out 262 263 def centers(self, flow=False): 264 """ 265 Args: 266 flow (bool): If True, include ``-inf`` and ``inf`` before and after 267 the normal (finite) bin centers. 268 269 Returns bin center positions as a one-dimensional ``numpy.ndarray`` of 270 ``numpy.float64``. 271 272 With ``flow=False``, the number of bin centers is equal to the number of 273 normal (finite-width) bins. 274 275 Setting ``flow=True`` increases the length of the output by two. 276 277 For ROOT histograms, numerical bin centers exist even if the axis also has 278 string-valued labels. 279 """ 280 edges = self.edges(flow=flow) 281 return (edges[1:] + edges[:-1]) / 2.0 282 283 def widths(self, flow=False): 284 """ 285 Args: 286 flow (bool): If True, include ``-inf`` and ``inf`` before and after 287 the normal (finite) bin widths. 288 289 Returns bin widths as a one-dimensional ``numpy.ndarray`` of 290 ``numpy.float64``. 291 292 With ``flow=False``, the number of bin widths is equal to the number of 293 normal (finite-width) bins. 294 295 Setting ``flow=True`` increases the length of the output by two. 296 297 For ROOT histograms, numerical bin widths exist even if the axis also has 298 string-valued labels. 299 """ 300 fNbins = self.member("fNbins") 301 fXbins = self.member("fXbins", none_if_missing=True) 302 303 if not flow and (fXbins is None or len(fXbins) != fNbins + 1): 304 width = (self.member("fXmax") - self.member("fXmin")) / fNbins 305 return numpy.broadcast_to(width, (fNbins,)) 306 else: 307 edges = self.edges(flow=flow) 308 return edges[1:] - edges[:-1] 309 [end of uproot/behaviors/TAxis.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/uproot/behaviors/TAxis.py b/uproot/behaviors/TAxis.py --- a/uproot/behaviors/TAxis.py +++ b/uproot/behaviors/TAxis.py @@ -72,7 +72,7 @@ return low, high else: - return fXbins[where + 1], fXbins[where + 2] + return fXbins[where], fXbins[where + 1] def __iter__(self): """
{"golden_diff": "diff --git a/uproot/behaviors/TAxis.py b/uproot/behaviors/TAxis.py\n--- a/uproot/behaviors/TAxis.py\n+++ b/uproot/behaviors/TAxis.py\n@@ -72,7 +72,7 @@\n return low, high\n \n else:\n- return fXbins[where + 1], fXbins[where + 2]\n+ return fXbins[where], fXbins[where + 1]\n \n def __iter__(self):\n \"\"\"\n", "issue": "Histogram protocol issue?\nI am confused about `PlottableAxisContinuous.__getitem__`: for uproot4 it seems to give the one-indexed values where I was expecting zero-indexed, since `__len__` gives the number of bins without flow. For example:\r\n```\r\n>>> h = uproot.open('https://raw.githubusercontent.com/CoffeaTeam/coffea/master/tests/samples/testSF2d.histo.root:scalefactors_Tight_Electron')\r\n>>> axis = h.axes[0]\r\n>>> len(axis)\r\n10\r\n>>> axis.edges()\r\narray([-2.5 , -2. , -1.566, -1.444, -0.8 , 0. , 0.8 , 1.444,\r\n 1.566, 2. , 2.5 ])\r\n>>> axis.edges().shape\r\n(11,)\r\n>>> axis[0]\r\n(-2.0, -1.566)\r\n```\n", "before_files": [{"content": "# BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/main/LICENSE\n\n\"\"\"\nThis module defines the behaviors of ``TAxis``, an axis of a histogram or profile plot.\n\"\"\"\n\nfrom __future__ import absolute_import\n\ntry:\n from collections.abc import Sequence\nexcept ImportError:\n from collections import Sequence\n\nimport numpy\n\n\nclass AxisTraits(object):\n \"\"\"\n Describes read-only properties of a histogram axis.\n\n For example, ``axis.traits.discrete`` is True if the histogram has\n labels; False otherwise.\n \"\"\"\n\n def __init__(self, axis):\n self._axis = axis\n\n def __repr__(self):\n return \"AxisTraits({0})\".format(repr(self._axis))\n\n @property\n def circular(self):\n \"\"\"\n True if the axis \"wraps around\" (always False for ROOT histograms).\n \"\"\"\n return False\n\n @property\n def discrete(self):\n \"\"\"\n True if bins are discrete: if they have string-valued labels.\n \"\"\"\n fNbins = self._axis.member(\"fNbins\")\n fLabels = self._axis.member(\"fLabels\", none_if_missing=True)\n return fLabels is not None and len(fLabels) == fNbins\n\n\nclass TAxis(Sequence):\n def __len__(self):\n \"\"\"\n The number of bins in the axis.\n \"\"\"\n return self.member(\"fNbins\")\n\n def __getitem__(self, where):\n \"\"\"\n Returns the label at ``where`` if it exists or the interval at ``where``.\n\n The indexing assumes that ``flow=False``.\n \"\"\"\n fNbins = self.member(\"fNbins\")\n fXbins = self.member(\"fXbins\", none_if_missing=True)\n fLabels = self.member(\"fLabels\", none_if_missing=True)\n\n if fLabels is not None and len(fLabels) == fNbins:\n return str(fLabels[where])\n\n elif fXbins is None or len(fXbins) != fNbins + 1:\n fXmin, fXmax = self.member(\"fXmin\"), self.member(\"fXmax\")\n low = (fXmax - fXmin) * (where) / float(fNbins) + fXmin\n high = (fXmax - fXmin) * (where + 1) / float(fNbins) + fXmin\n return low, high\n\n else:\n return fXbins[where + 1], fXbins[where + 2]\n\n def __iter__(self):\n \"\"\"\n Iterate over the output of ``__getitem__``.\n \"\"\"\n fNbins = self.member(\"fNbins\")\n fLabels = self.member(\"fLabels\", none_if_missing=True)\n\n if fLabels is not None and len(fLabels) == fNbins:\n for x in fLabels:\n yield str(x)\n else:\n for low, high in self.intervals():\n yield low, high\n\n def __eq__(self, other):\n \"\"\"\n Two axes are equal if they have the same type and\n ``list(self) == list(other)``.\n \"\"\"\n if type(self) is not type(other):\n return False\n\n self_fNbins = self.member(\"fNbins\")\n other_fNbins = other.member(\"fNbins\")\n if self_fNbins != other_fNbins:\n return False\n\n self_fLabels = self.member(\"fLabels\", none_if_missing=True)\n other_fLabels = other.member(\"fLabels\", none_if_missing=True)\n self_labeled = self_fLabels is not None and len(self_fLabels) == self_fNbins\n other_labeled = other_fLabels is not None and len(other_fLabels) == other_fNbins\n\n if self_labeled and other_labeled:\n return all(x == y for x, y in zip(self_fLabels, other_fLabels))\n elif not self_labeled and not other_labeled:\n return numpy.array_equal(self.edges(), other.edges())\n else:\n return False\n\n def __ne__(self, other):\n \"\"\"\n Some versions of Python don't automatically negate __eq__.\n \"\"\"\n return not self.__eq__(other)\n\n @property\n def traits(self):\n \"\"\"\n Describes read-only properties of a histogram axis.\n\n For example, ``axis.traits.discrete`` is True if the histogram has\n labels; False otherwise.\n \"\"\"\n return AxisTraits(self)\n\n @property\n def low(self):\n \"\"\"\n The low edge of the first normal (finite-width) bin.\n\n For ROOT histograms, numerical edges exist even if the axis also has\n string-valued labels.\n \"\"\"\n return self.member(\"fXmin\")\n\n @property\n def high(self):\n \"\"\"\n The high edge of the last normal (finite-width) bin.\n\n For ROOT histograms, numerical edges exist even if the axis also has\n string-valued labels.\n \"\"\"\n return self.member(\"fXmax\")\n\n @property\n def width(self):\n \"\"\"\n The average bin width (or only bin width if the binning is uniform).\n \"\"\"\n fNbins = self.member(\"fNbins\")\n fXbins = self.member(\"fXbins\", none_if_missing=True)\n\n if fXbins is None or len(fXbins) != fNbins + 1:\n return (self.member(\"fXmax\") - self.member(\"fXmin\")) / fNbins\n else:\n return self.widths().mean()\n\n def labels(self, flow=False):\n \"\"\"\n Args:\n flow (bool): If True, include ``\"underflow\"`` and ``\"overflow\"``\n before and after the normal (finite-width) bin labels (if they\n exist).\n\n If string-valued labels exist, this returns them as a Python list of\n Python strings. Otherwise, this returns None.\n\n Setting ``flow=True`` increases the length of the output by two.\n \"\"\"\n fNbins = self.member(\"fNbins\")\n fLabels = self.member(\"fLabels\", none_if_missing=True)\n\n if fLabels is not None and len(fLabels) == fNbins:\n out = [str(x) for x in fLabels]\n if flow:\n return [\"underflow\"] + out + [\"overflow\"]\n else:\n return out\n else:\n return None\n\n def edges(self, flow=False):\n \"\"\"\n Args:\n flow (bool): If True, include ``-inf`` and ``inf`` before and\n after the normal (finite-width) bin edges.\n\n Returns numerical edges between bins as a one-dimensional ``numpy.ndarray``\n of ``numpy.float64``.\n\n Even with ``flow=False``, the number of edges is *one greater than* the\n number of normal (finite-width) bins because they represent \"fenceposts\"\n between the bins, including one below and one above the full range.\n\n Setting ``flow=True`` increases the length of the output by two.\n\n For ROOT histograms, numerical edges exist even if the axis also has\n string-valued labels.\n \"\"\"\n fNbins = self.member(\"fNbins\")\n fXbins = self.member(\"fXbins\", none_if_missing=True)\n\n if fXbins is None or len(fXbins) != fNbins + 1:\n fXbins = numpy.linspace(\n self.member(\"fXmin\"), self.member(\"fXmax\"), fNbins + 1\n )\n\n if flow:\n out = numpy.empty(fNbins + 3, dtype=numpy.float64)\n out[0] = -numpy.inf\n out[-1] = numpy.inf\n out[1:-1] = fXbins\n else:\n out = numpy.asarray(fXbins, dtype=fXbins.dtype.newbyteorder(\"=\"))\n\n return out\n\n def intervals(self, flow=False):\n \"\"\"\n Args:\n flow (bool): If True, include ``[-inf, min]`` and ``[max, inf]``\n before and after the normal (finite-width) intervals.\n\n Returns low, high pairs for each bin interval as a two-dimensional\n ``numpy.ndarray`` of ``numpy.float64``.\n\n With ``flow=False``, the number of intervals is equal to the number of\n normal (finite-width) bins.\n\n Setting ``flow=True`` increases the length of the output by two.\n\n For ROOT histograms, numerical intervals exist even if the axis also has\n string-valued labels.\n \"\"\"\n fNbins = self.member(\"fNbins\")\n fXbins = self.member(\"fXbins\", none_if_missing=True)\n\n if fXbins is None or len(fXbins) != fNbins + 1:\n fXbins = numpy.linspace(\n self.member(\"fXmin\"), self.member(\"fXmax\"), fNbins + 1\n )\n\n if flow:\n out = numpy.empty((fNbins + 2, 2), dtype=numpy.float64)\n out[0, 0] = -numpy.inf\n out[-1, 1] = numpy.inf\n out[1:, 0] = fXbins\n out[:-1, 1] = fXbins\n else:\n out = numpy.empty((fNbins, 2), dtype=numpy.float64)\n out[:, 0] = fXbins[:-1]\n out[:, 1] = fXbins[1:]\n\n return out\n\n def centers(self, flow=False):\n \"\"\"\n Args:\n flow (bool): If True, include ``-inf`` and ``inf`` before and after\n the normal (finite) bin centers.\n\n Returns bin center positions as a one-dimensional ``numpy.ndarray`` of\n ``numpy.float64``.\n\n With ``flow=False``, the number of bin centers is equal to the number of\n normal (finite-width) bins.\n\n Setting ``flow=True`` increases the length of the output by two.\n\n For ROOT histograms, numerical bin centers exist even if the axis also has\n string-valued labels.\n \"\"\"\n edges = self.edges(flow=flow)\n return (edges[1:] + edges[:-1]) / 2.0\n\n def widths(self, flow=False):\n \"\"\"\n Args:\n flow (bool): If True, include ``-inf`` and ``inf`` before and after\n the normal (finite) bin widths.\n\n Returns bin widths as a one-dimensional ``numpy.ndarray`` of\n ``numpy.float64``.\n\n With ``flow=False``, the number of bin widths is equal to the number of\n normal (finite-width) bins.\n\n Setting ``flow=True`` increases the length of the output by two.\n\n For ROOT histograms, numerical bin widths exist even if the axis also has\n string-valued labels.\n \"\"\"\n fNbins = self.member(\"fNbins\")\n fXbins = self.member(\"fXbins\", none_if_missing=True)\n\n if not flow and (fXbins is None or len(fXbins) != fNbins + 1):\n width = (self.member(\"fXmax\") - self.member(\"fXmin\")) / fNbins\n return numpy.broadcast_to(width, (fNbins,))\n else:\n edges = self.edges(flow=flow)\n return edges[1:] - edges[:-1]\n", "path": "uproot/behaviors/TAxis.py"}]}
4,090
114
gh_patches_debug_63531
rasdani/github-patches
git_diff
MongoEngine__mongoengine-2224
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> New release Hi, When is coming new release, because I can't update to mongodb 4.2 because of this: https://github.com/MongoEngine/mongoengine/pull/2160/commits/47f8a126ca167cb8fe020e3cc5604b155dfcdebc. Thanks </issue> <code> [start of mongoengine/__init__.py] 1 # Import submodules so that we can expose their __all__ 2 from mongoengine import connection 3 from mongoengine import document 4 from mongoengine import errors 5 from mongoengine import fields 6 from mongoengine import queryset 7 from mongoengine import signals 8 9 # Import everything from each submodule so that it can be accessed via 10 # mongoengine, e.g. instead of `from mongoengine.connection import connect`, 11 # users can simply use `from mongoengine import connect`, or even 12 # `from mongoengine import *` and then `connect('testdb')`. 13 from mongoengine.connection import * 14 from mongoengine.document import * 15 from mongoengine.errors import * 16 from mongoengine.fields import * 17 from mongoengine.queryset import * 18 from mongoengine.signals import * 19 20 21 __all__ = ( 22 list(document.__all__) 23 + list(fields.__all__) 24 + list(connection.__all__) 25 + list(queryset.__all__) 26 + list(signals.__all__) 27 + list(errors.__all__) 28 ) 29 30 31 VERSION = (0, 18, 2) 32 33 34 def get_version(): 35 """Return the VERSION as a string. 36 37 For example, if `VERSION == (0, 10, 7)`, return '0.10.7'. 38 """ 39 return ".".join(map(str, VERSION)) 40 41 42 __version__ = get_version() 43 [end of mongoengine/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mongoengine/__init__.py b/mongoengine/__init__.py --- a/mongoengine/__init__.py +++ b/mongoengine/__init__.py @@ -28,7 +28,7 @@ ) -VERSION = (0, 18, 2) +VERSION = (0, 19, 0) def get_version():
{"golden_diff": "diff --git a/mongoengine/__init__.py b/mongoengine/__init__.py\n--- a/mongoengine/__init__.py\n+++ b/mongoengine/__init__.py\n@@ -28,7 +28,7 @@\n )\n \n \n-VERSION = (0, 18, 2)\n+VERSION = (0, 19, 0)\n \n \n def get_version():\n", "issue": "New release\nHi,\r\n\r\nWhen is coming new release, because I can't update to mongodb 4.2 because of this: https://github.com/MongoEngine/mongoengine/pull/2160/commits/47f8a126ca167cb8fe020e3cc5604b155dfcdebc.\r\n\r\nThanks\n", "before_files": [{"content": "# Import submodules so that we can expose their __all__\nfrom mongoengine import connection\nfrom mongoengine import document\nfrom mongoengine import errors\nfrom mongoengine import fields\nfrom mongoengine import queryset\nfrom mongoengine import signals\n\n# Import everything from each submodule so that it can be accessed via\n# mongoengine, e.g. instead of `from mongoengine.connection import connect`,\n# users can simply use `from mongoengine import connect`, or even\n# `from mongoengine import *` and then `connect('testdb')`.\nfrom mongoengine.connection import *\nfrom mongoengine.document import *\nfrom mongoengine.errors import *\nfrom mongoengine.fields import *\nfrom mongoengine.queryset import *\nfrom mongoengine.signals import *\n\n\n__all__ = (\n list(document.__all__)\n + list(fields.__all__)\n + list(connection.__all__)\n + list(queryset.__all__)\n + list(signals.__all__)\n + list(errors.__all__)\n)\n\n\nVERSION = (0, 18, 2)\n\n\ndef get_version():\n \"\"\"Return the VERSION as a string.\n\n For example, if `VERSION == (0, 10, 7)`, return '0.10.7'.\n \"\"\"\n return \".\".join(map(str, VERSION))\n\n\n__version__ = get_version()\n", "path": "mongoengine/__init__.py"}]}
977
85