id
stringlengths
40
40
text
stringlengths
29
2.03k
original_text
stringlengths
3
154k
subdomain
stringclasses
20 values
metadata
dict
df556343afd85ea31212f9c8a52e4d5a49a162cd
Stackoverflow Stackexchange Q: How to force CKEditor to preserve tags I am using the latest version of CKEditor (4.7 to date) with the standard package, and I want to be able to force it to preserve line break elements (<br>). I have attempted to use the following config, without success: CKEDITOR.replace('ck', { allowedContent: true, enterMode: CKEDITOR.ENTER_BR }); As you can see in this jsfiddle, when you open Source mode, <br> tags have been replaced with a &nbsp;. How do you achieve that? A: A workaround (or at least partial workaround) was given on this CKEditor ticket, which forces the CKEditor to preserve <br> tags: editor.on( 'pluginsLoaded', function( evt ){ evt.editor.dataProcessor.dataFilter.addRules({ elements :{ br : function( element ) { //if next element is BR or <!--cke_br_comment-->, ignore it. if( element && element.next && ( element.next.name == 'br' || element.next.value == 'cke_br_comment' ) ){ return; }else { var comment = new CKEDITOR.htmlParser.comment( 'cke_br_comment' ); comment.insertAfter( element ); } } } }); evt.editor.dataProcessor.htmlFilter.addRules({ comment : function( value, node ) { if( value.indexOf('cke_br_comment') >= 0 ) { return false; } } }); Updated fiddle here. EDIT: you might also want to check my other answer which may work better depending on your needs.
Q: How to force CKEditor to preserve tags I am using the latest version of CKEditor (4.7 to date) with the standard package, and I want to be able to force it to preserve line break elements (<br>). I have attempted to use the following config, without success: CKEDITOR.replace('ck', { allowedContent: true, enterMode: CKEDITOR.ENTER_BR }); As you can see in this jsfiddle, when you open Source mode, <br> tags have been replaced with a &nbsp;. How do you achieve that? A: A workaround (or at least partial workaround) was given on this CKEditor ticket, which forces the CKEditor to preserve <br> tags: editor.on( 'pluginsLoaded', function( evt ){ evt.editor.dataProcessor.dataFilter.addRules({ elements :{ br : function( element ) { //if next element is BR or <!--cke_br_comment-->, ignore it. if( element && element.next && ( element.next.name == 'br' || element.next.value == 'cke_br_comment' ) ){ return; }else { var comment = new CKEDITOR.htmlParser.comment( 'cke_br_comment' ); comment.insertAfter( element ); } } } }); evt.editor.dataProcessor.htmlFilter.addRules({ comment : function( value, node ) { if( value.indexOf('cke_br_comment') >= 0 ) { return false; } } }); Updated fiddle here. EDIT: you might also want to check my other answer which may work better depending on your needs. A: I think I have found a better answer which will work in more cases: introducing the "brangel" plugin: CKEDITOR.plugins.add('brangel', { init: function (editor) { editor.on('toHtml', function( evt ) { protectBRs(evt.data.dataValue); }, null, null, 5); editor.on('toHtml', function( evt ) { unprotectBRs(evt.data.dataValue); }, null, null, 14); editor.on('toDataFormat', function( evt ) { protectBRs(evt.data.dataValue); }, null, null, 5); editor.on('toDataFormat', function( evt ) { unprotectBRs(evt.data.dataValue); }, null, null, 14); function protectBRs(element) { var children = element.children; if (children) { for (var i = children.length; i--; ) { var child = children[i]; if (child.name == "br") { var placeholder = new CKEDITOR.htmlParser.text('{cke_br}'); placeholder.insertAfter(child); child.remove(); } else { protectBRs(child); } } } } function unprotectBRs(element) { var children = element.children; if (children) { for (var i = children.length; i--; ) { var child = children[i]; if (child instanceof CKEDITOR.htmlParser.text && child.value === "{cke_br}") { var br = new CKEDITOR.htmlParser.element('br'); br.insertAfter(child); child.remove(); } else { unprotectBRs(child); } } } } } }); The idea is to save the <br> elements from destruction by temporarily replacing them with some placeholder text ({cke_br}) before the filtering phase of the CKEditor occurs (see toDataFormat and toHtml events), and then restore them back at the end. This is all transparent to the user. Updated fiddle here. A: The developers of CKeditor have reportedly told that br to nbsp auto conversion is not an issue but CKeditors ways of normalizing things. It wont create any problem for you. So, you need not worry about your br tags being converted to nbsp Go through the following link for more. If you wish to remove the &nbsp from the source code, One way if to include the following : basicEntities: false, entities_additional: 'lt,gt,amp,apos,quot'
stackoverflow
{ "language": "en", "length": 476, "provenance": "stackexchange_0000F.jsonl.gz:851826", "question_score": "4", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44501658" }
017aa899df9c597fd016e92d4c59feca16542dc4
Stackoverflow Stackexchange Q: Local/UTC datetime string to Date There are datetime strings that were concatenated from date and time values: const localDatetime = `2017-01-01T12:00`; const utcDatetime = `2017-01-01T12:00`; and supposed to be converted to Date object. In Firefox it is accepted as local time: new Date('2017-06-12T12:00').toISOString() === '2017-06-12T08:00:00.000Z' And in Chrome it is accepted as UTC time: new Date('2017-06-12T12:00').toISOString() === '2017-06-12T12:00:00.000Z' This looks inconsistent, to say at least. What is the explanation for that? Which of these browsers is right and why? What is cross-browser solution to perform this transformation properly for both local and UTC strings?
Q: Local/UTC datetime string to Date There are datetime strings that were concatenated from date and time values: const localDatetime = `2017-01-01T12:00`; const utcDatetime = `2017-01-01T12:00`; and supposed to be converted to Date object. In Firefox it is accepted as local time: new Date('2017-06-12T12:00').toISOString() === '2017-06-12T08:00:00.000Z' And in Chrome it is accepted as UTC time: new Date('2017-06-12T12:00').toISOString() === '2017-06-12T12:00:00.000Z' This looks inconsistent, to say at least. What is the explanation for that? Which of these browsers is right and why? What is cross-browser solution to perform this transformation properly for both local and UTC strings?
stackoverflow
{ "language": "en", "length": 95, "provenance": "stackexchange_0000F.jsonl.gz:851828", "question_score": "4", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44501661" }
57a5b4981771dc58e8b1eb638dedc6fb0790e094
Stackoverflow Stackexchange Q: ICollection does not contain a definition for 'Contains' I am trying to check whether a SearchResultEntry contains a certain attribute. For this, I am trying the following code: if(searchResultEntry.Attributes.AttributeNames.Contains(propertyName)) but this fails with the error ICollection does not contain a definition for Contains, and the best extension method overload "Queryable.Contains<string>(IQueryable<string>, string)" requires a receiver of type "IQueryable<string>". I also tried to make it an IQueryable using AttributeNames.AsQueryable(), but on that IQueryable, Contains is not available as well. What is wrong here? When IntelliSense tells me something is available, but then, when I got to that very type, it is still unavailable, am I missing some references or using directives?
Q: ICollection does not contain a definition for 'Contains' I am trying to check whether a SearchResultEntry contains a certain attribute. For this, I am trying the following code: if(searchResultEntry.Attributes.AttributeNames.Contains(propertyName)) but this fails with the error ICollection does not contain a definition for Contains, and the best extension method overload "Queryable.Contains<string>(IQueryable<string>, string)" requires a receiver of type "IQueryable<string>". I also tried to make it an IQueryable using AttributeNames.AsQueryable(), but on that IQueryable, Contains is not available as well. What is wrong here? When IntelliSense tells me something is available, but then, when I got to that very type, it is still unavailable, am I missing some references or using directives?
stackoverflow
{ "language": "en", "length": 110, "provenance": "stackexchange_0000F.jsonl.gz:851830", "question_score": "3", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44501670" }
74dd8646fbfc69b129a245fdf7e0a0cdf70baed3
Stackoverflow Stackexchange Q: Explanation of Typescript's intersection types i was playing around with intersection types and i would expect following as working? Could someone shed some light on it? type SomeError = { message: string; code?: number; }; type SomeResponse = { error: SomeError & { code: string; } }; const response: SomeResponse = { error: { message: 'neco', code: 'a' } }; // Type 'string' is not assignable to type 'number'. const response2: SomeResponse = { error: { message: 'neco', code: 50 } }; // Type 'number' is not assignable to type 'string'. A: The issue is that SomeResponse has this type for code: number & string And that's impossible to have. You can check that this is the case quite easily in playground with your code: let test = response.error.code; The type of test is number & string (just hover over the variable name)
Q: Explanation of Typescript's intersection types i was playing around with intersection types and i would expect following as working? Could someone shed some light on it? type SomeError = { message: string; code?: number; }; type SomeResponse = { error: SomeError & { code: string; } }; const response: SomeResponse = { error: { message: 'neco', code: 'a' } }; // Type 'string' is not assignable to type 'number'. const response2: SomeResponse = { error: { message: 'neco', code: 50 } }; // Type 'number' is not assignable to type 'string'. A: The issue is that SomeResponse has this type for code: number & string And that's impossible to have. You can check that this is the case quite easily in playground with your code: let test = response.error.code; The type of test is number & string (just hover over the variable name) A: As others have pointed out, it seems you want union types (with the |). Check out the docs on advanced types and use the online REPL to test out theories. Here's code that uses interfaces and union types to get a flexible number / string code in your error type. interface SomeError { message: string; code: number | string; }; interface SomeResponse { error: SomeError } const response: SomeResponse = { error: { message: 'neco', code: 'a' } }; const response2: SomeResponse = { error: { message: 'neco', code: 50 } }; The docs lay out a use case for intersections, but it seems you just want specialization, which is where type guards come in, consider this function: const printErrCode = (code: string | number) => { if(typeof code === "string") { console.error(code); } else { console.error(`Err code: ${code}`); } } Edit: if you want to play with intersection, try replicating the extend function to create mixins, but do it with your error domain. Try to make error serialization / loggable / printable etc. And then mixin a plain error object (with just a string or something) with an object that can log (like the example of ConsoleLogger).
stackoverflow
{ "language": "en", "length": 343, "provenance": "stackexchange_0000F.jsonl.gz:851833", "question_score": "3", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44501680" }
7a4fb8b719b61ba566213a74497531c61d61f383
Stackoverflow Stackexchange Q: display the full column size in zeppelin %sql I simply want to display the column without truncate into a select where i have an array or a Map with a very big lenth. I use zeppelin to query a df register as temp table: %livy.sql select * from maTable I would like to have the full length of the Consommeur column I get something like: Do you have any idea? A: This issue is solved in version 0.8.0. The problem is that by default Zeppelin truncates the string values. In version 0.8.0 zeppelin.livy.spark.sql.field.truncate can be set to false (by default is set to true). This can be done going to the interpreter settings and adding that property. Extra information on the solution to the issue here: https://github.com/apache/zeppelin/pull/2201 Original Jira ticket for the issue: https://issues.apache.org/jira/browse/ZEPPELIN-1965
Q: display the full column size in zeppelin %sql I simply want to display the column without truncate into a select where i have an array or a Map with a very big lenth. I use zeppelin to query a df register as temp table: %livy.sql select * from maTable I would like to have the full length of the Consommeur column I get something like: Do you have any idea? A: This issue is solved in version 0.8.0. The problem is that by default Zeppelin truncates the string values. In version 0.8.0 zeppelin.livy.spark.sql.field.truncate can be set to false (by default is set to true). This can be done going to the interpreter settings and adding that property. Extra information on the solution to the issue here: https://github.com/apache/zeppelin/pull/2201 Original Jira ticket for the issue: https://issues.apache.org/jira/browse/ZEPPELIN-1965
stackoverflow
{ "language": "en", "length": 135, "provenance": "stackexchange_0000F.jsonl.gz:851847", "question_score": "3", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44501710" }
fd5c416f78e58b8aa5c96f6ea8fc11cce94897af
Stackoverflow Stackexchange Q: How to merge contours in opencv? Ok guys I have been working on this project for quite some time now. I am building this bot that plays the chrome dinosaur game. So I tried other methods to detect the characters like matchTemplate and even made my own algorithm to locate the objects, but I like this one (findcontours) the most. Here's what I have: Can anyone help me find out how I should merge the two rectangles of the cacti? img = screen_cap() roi = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) ret, thresh = cv2.threshold(roi,127, 255, 0) im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) first = True for cnt in contours: area = cv2.contourArea(cnt) if area > 200: #filtering contours x,y,w,h = cv2.boundingRect(cnt) if w/h < 4: # filtering even more cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) A: The easiest way to merge contours is to stack them contours = np.vstack(contours) in your case(for example to stack contours 5 and 6): contours = np.vstack([contours[5], contours[6]])
Q: How to merge contours in opencv? Ok guys I have been working on this project for quite some time now. I am building this bot that plays the chrome dinosaur game. So I tried other methods to detect the characters like matchTemplate and even made my own algorithm to locate the objects, but I like this one (findcontours) the most. Here's what I have: Can anyone help me find out how I should merge the two rectangles of the cacti? img = screen_cap() roi = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) ret, thresh = cv2.threshold(roi,127, 255, 0) im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) first = True for cnt in contours: area = cv2.contourArea(cnt) if area > 200: #filtering contours x,y,w,h = cv2.boundingRect(cnt) if w/h < 4: # filtering even more cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) A: The easiest way to merge contours is to stack them contours = np.vstack(contours) in your case(for example to stack contours 5 and 6): contours = np.vstack([contours[5], contours[6]]) A: This is an old question and seems like it has not been answered properly yet (apologies to fellow SOers who did it partially in comments though). To me it seems like the questioner has a two-part question: * *Is there a opencv function that merges two rectangles? Answer to this question is yes and no. Let me be clear; yes if you are using opencv C++ bindings. Simple & can be used to take a union and | for intersection of two rects. But Python bindings lack those functions. *How to then do it in Python? def union(a,b): x = min(a[0], b[0]) y = min(a[1], b[1]) w = max(a[0]+a[2], b[0]+b[2]) - x h = max(a[1]+a[3], b[1]+b[3]) - y return (x, y, w, h) def intersection(a,b): x = max(a[0], b[0]) y = max(a[1], b[1]) w = min(a[0]+a[2], b[0]+b[2]) - x h = min(a[1]+a[3], b[1]+b[3]) - y if w<0 or h<0: return () # or (0,0,0,0) ? return (x, y, w, h) # Please remember a and b are rects. Source Code Credit: OpenCV union and intersaction on rects A: Sorry to be a little late to the party. However if I google "merge opencv contours" I find this; and I think there should be an answer. You can merge any two contours by one of those recipes: * *Get a list of points of each contour *append them *force them into cv2 contour format *get cv2.convexHull of that if you don't care too much about the details. If you don't like the result of convexHull because the concave parts of the contours are important, then follow this recipe instead: * *Get a list of points of each contour *append them *get a common center *sort all the points in clockwise order around the center *force them into cv2 contour format If the two contours have a lot of concave shapes in them, this could yield a zigzag-pattern as the recipe goes through both contours disregarding their original structure. If that's the case, you need to follow a third recipe: * *Get a list of points of each contour *get a common center *delete points of each contour which are inside the other contour *find the point which is closest to the common center in each contour. *go through the first contour as it is listed until you hit the closest point. *Then switch to the other list, starting from the closest point you go clockwise through the other contour until it is used up *switch back to the first contour and append the rest of their points. *force them into cv2 contour format The next-more-complicated case is if you have multiple intersections between the contours and you want to preserve the holes between the two. Then it's better to make a black image and draw the contours in white via cv2.fillPoly(); then get the contours back out via cv2.findContours() I sketched some steps for the first two recipes here get a list of points of each contour: import cv2 list_of_pts = [] for ctr in ctrs_to_merge list_of_pts += [pt[0] for pt in ctr] order points clockwise I use the function of this really great posting of MSeifert to order the points in clockwise order class clockwise_angle_and_distance(): ''' A class to tell if point is clockwise from origin or not. This helps if one wants to use sorted() on a list of points. Parameters ---------- point : ndarray or list, like [x, y]. The point "to where" we g0 self.origin : ndarray or list, like [x, y]. The center around which we go refvec : ndarray or list, like [x, y]. The direction of reference use: instantiate with an origin, then call the instance during sort reference: https://stackoverflow.com/questions/41855695/sorting-list-of-two-dimensional-coordinates-by-clockwise-angle-using-python Returns ------- angle distance ''' def __init__(self, origin): self.origin = origin def __call__(self, point, refvec = [0, 1]): if self.origin is None: raise NameError("clockwise sorting needs an origin. Please set origin.") # Vector between point and the origin: v = p - o vector = [point[0]-self.origin[0], point[1]-self.origin[1]] # Length of vector: ||v|| lenvector = np.linalg.norm(vector[0] - vector[1]) # If length is zero there is no angle if lenvector == 0: return -pi, 0 # Normalize vector: v/||v|| normalized = [vector[0]/lenvector, vector[1]/lenvector] dotprod = normalized[0]*refvec[0] + normalized[1]*refvec[1] # x1*x2 + y1*y2 diffprod = refvec[1]*normalized[0] - refvec[0]*normalized[1] # x1*y2 - y1*x2 angle = atan2(diffprod, dotprod) # Negative angles represent counter-clockwise angles so we need to # subtract them from 2*pi (360 degrees) if angle < 0: return 2*pi+angle, lenvector # I return first the angle because that's the primary sorting criterium # but if two vectors have the same angle then the shorter distance # should come first. return angle, lenvector center_pt = np.array(list_of_pts).mean(axis = 0) # get origin clock_ang_dist = clockwise_angle_and_distance(origin) # set origin list_of_pts = sorted(list_of_pts, key=clock_ang_dist) # use to sort force a list of points into cv2 format import numpy as np ctr = np.array(list_of_pts).reshape((-1,1,2)).astype(np.int32) merge them with cv2.convexHull instead If you use this then there is no need to order the points clockwise. However, the convexHull might lose some contour properties because it does not preserve concave corners of your contour. # get a list of points # force the list of points into cv2 format and then ctr = cv2.convexHull(ctr) # done. I think the function to merge two contours should be content of the opencv library. The recipe is quite straightforward and it is sad that many programmers using opencv will have to boilerplate code this.
stackoverflow
{ "language": "en", "length": 1064, "provenance": "stackexchange_0000F.jsonl.gz:851850", "question_score": "7", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44501723" }
ebe8f24141bf3e13eb37c4ccd7d7061b8926be10
Stackoverflow Stackexchange Q: Rx replay sequence of event at original pace (HistoricalScheduler ?) i recorded some events from a machine on the field (positioning data). I would like to replay them like it was coming from the physical system to build simulations for the user. I tried to do it with RX and HistoricalScheduler but without luck, is it possible to run it at original speed ? Thanks Example, i would like scheduler.Start() to take 5 seconds to execute with subscribers notified with real delays : var source = Observable.Interval(TimeSpan.FromSeconds(1)); var log = source.Take(5).Timestamp().ToList().Wait(); Console.WriteLine("Time now is " + DateTime.Now); var scheduler = new HistoricalScheduler(); var replay = Observable.Generate( log.GetEnumerator(), events => events.MoveNext(), events => events, events => events.Current.Value, events => events.Current.Timestamp, scheduler); replay.Subscribe(i => Console.WriteLine("Event: {0} happened at {1}", i, scheduler.Now)); scheduler.Start();
Q: Rx replay sequence of event at original pace (HistoricalScheduler ?) i recorded some events from a machine on the field (positioning data). I would like to replay them like it was coming from the physical system to build simulations for the user. I tried to do it with RX and HistoricalScheduler but without luck, is it possible to run it at original speed ? Thanks Example, i would like scheduler.Start() to take 5 seconds to execute with subscribers notified with real delays : var source = Observable.Interval(TimeSpan.FromSeconds(1)); var log = source.Take(5).Timestamp().ToList().Wait(); Console.WriteLine("Time now is " + DateTime.Now); var scheduler = new HistoricalScheduler(); var replay = Observable.Generate( log.GetEnumerator(), events => events.MoveNext(), events => events, events => events.Current.Value, events => events.Current.Timestamp, scheduler); replay.Subscribe(i => Console.WriteLine("Event: {0} happened at {1}", i, scheduler.Now)); scheduler.Start();
stackoverflow
{ "language": "en", "length": 131, "provenance": "stackexchange_0000F.jsonl.gz:851888", "question_score": "3", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44501859" }
b79999297c1f2ae365d3c04a1c6440d5c4d411db
Stackoverflow Stackexchange Q: How to use a custom attribute on an assembly in .NET Core 1.1 While I found this post regarding retrieving a custom attribute on an assembly, I am unsure on how to add a custom attribute to an assembly in .NET Core 1.1. In .NET Framework, I would have done something like: [assembly: AdditionalLocation(@"..\ReadFromHere")] But my netcore1.1 projects in Visual Studio do not have an AssemblyInfo.cs. Where would I declare a custom attribute for an assembly? Is there something I can put in the .csproj file? A: You can always create a new AssemblyInfo.cs file or any other .cs file to do the same. However you can also use the new auto-generated assembly info mechanism. You can add this to your csproj file, replace the value replacing the Include attributes value with the type name of your custom attribute: <ItemGroup> <AssemblyAttribute Include="System.Runtime.CompilerServices.InternalsVisibleTo"> <_Parameter1>DasMulli.Win32.ServiceUtils.Tests</_Parameter1> </AssemblyAttribute> </ItemGroup>
Q: How to use a custom attribute on an assembly in .NET Core 1.1 While I found this post regarding retrieving a custom attribute on an assembly, I am unsure on how to add a custom attribute to an assembly in .NET Core 1.1. In .NET Framework, I would have done something like: [assembly: AdditionalLocation(@"..\ReadFromHere")] But my netcore1.1 projects in Visual Studio do not have an AssemblyInfo.cs. Where would I declare a custom attribute for an assembly? Is there something I can put in the .csproj file? A: You can always create a new AssemblyInfo.cs file or any other .cs file to do the same. However you can also use the new auto-generated assembly info mechanism. You can add this to your csproj file, replace the value replacing the Include attributes value with the type name of your custom attribute: <ItemGroup> <AssemblyAttribute Include="System.Runtime.CompilerServices.InternalsVisibleTo"> <_Parameter1>DasMulli.Win32.ServiceUtils.Tests</_Parameter1> </AssemblyAttribute> </ItemGroup> A: With .NET 5.0, you can use AssemblyMetadata: <AssemblyMetadata Include="Bar" Value="Baz" /> A: I just come with this problem when moved my library from old .net framework to .net standard and I managed to solve the problem with adding non-string attribute that used to live in AssemblyInfo.cs. Here is fragment of my .csproj file and when run build my attribute was there: <ItemGroup> <AssemblyAttribute Include="Xunit.CollectionBehavior"> <_Parameter1>Xunit.CollectionBehavior.CollectionPerAssembly</_Parameter1> <_Parameter1_IsLiteral>true</_Parameter1_IsLiteral> <_Parameter1_TypeName>Xunit.CollectionBehavior.CollectionPerAssembly</_Parameter1_TypeName> </AssemblyAttribute> </ItemGroup> It was added to msbuild as described here: https://github.com/dotnet/msbuild/issues/2281 with https://github.com/dotnet/msbuild/blob/main/documentation/Changelog.md#msbuild-16100 this release. Hope it helps! Now this is way better with this.
stackoverflow
{ "language": "en", "length": 239, "provenance": "stackexchange_0000F.jsonl.gz:851921", "question_score": "20", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44501954" }
dfa793c60743e983c1800d38504c7d56b4d4f9a4
Stackoverflow Stackexchange Q: Spark mapPartitionsWithIndex : Identify a partition Identify a partition : mapPartitionsWithIndex(index, iter) The method results into driving a function onto each partition. I understand that we can track the partition using "index" parameter. Numerous examples have used this method to remove the header in a data set using "index = 0" condition. But how do we make sure that the first partition which is read (translating, "index" parameter to be equal to 0) is indeed the header. Isint it random or based upon the partitioner, if used. A: Isn't it random or based upon the partitioner, if used? It is not random but partitioner number. You can understand it with below mentioned simple example val base = sc.parallelize(1 to 100, 4) base.mapPartitionsWithIndex((index, iterator) => { iterator.map { x => (index, x) } }).foreach { x => println(x) } Result : (0,1) (1,26) (2,51) (1,27) (0,2) (0,3) (0,4) (1,28) (2,52) (1,29) (0,5) (1,30) (1,31) (2,53) (1,32) (0,6) ... ...
Q: Spark mapPartitionsWithIndex : Identify a partition Identify a partition : mapPartitionsWithIndex(index, iter) The method results into driving a function onto each partition. I understand that we can track the partition using "index" parameter. Numerous examples have used this method to remove the header in a data set using "index = 0" condition. But how do we make sure that the first partition which is read (translating, "index" parameter to be equal to 0) is indeed the header. Isint it random or based upon the partitioner, if used. A: Isn't it random or based upon the partitioner, if used? It is not random but partitioner number. You can understand it with below mentioned simple example val base = sc.parallelize(1 to 100, 4) base.mapPartitionsWithIndex((index, iterator) => { iterator.map { x => (index, x) } }).foreach { x => println(x) } Result : (0,1) (1,26) (2,51) (1,27) (0,2) (0,3) (0,4) (1,28) (2,52) (1,29) (0,5) (1,30) (1,31) (2,53) (1,32) (0,6) ... ...
stackoverflow
{ "language": "en", "length": 159, "provenance": "stackexchange_0000F.jsonl.gz:851935", "question_score": "3", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44501995" }
9cd77a953c0384526ad9845fcdeb82e5b8af0b3a
Stackoverflow Stackexchange Q: How to copy lines above the cursor? It is sample text file here,the cursor located on 4th line. To input 2yy in the current status--normal mode, test4 test5 The above two lines ,which below the cursor copied. How to copy two lines above the cursor instead of below? test3 test4 To input 1k and 2yy can get it,is there another way? Both -2yy and 2YY can't work. A: The key combinations mentioned in the comments work. If you want to, you can also add the following mapping to your startup options and then use 2Y to copy the current line and the line above without moving the cursor. function! CopyLinesAbove(count) cal setreg('"',join(getline(line('.') - a:count + 1,line('.') + 1),"\n")."\n") endfunction command! -nargs=1 CopyLinesAboveCmd call CopyLinesAbove(<args>) map Y :<C-U>CopyLinesAboveCmd(v:count)<CR>
Q: How to copy lines above the cursor? It is sample text file here,the cursor located on 4th line. To input 2yy in the current status--normal mode, test4 test5 The above two lines ,which below the cursor copied. How to copy two lines above the cursor instead of below? test3 test4 To input 1k and 2yy can get it,is there another way? Both -2yy and 2YY can't work. A: The key combinations mentioned in the comments work. If you want to, you can also add the following mapping to your startup options and then use 2Y to copy the current line and the line above without moving the cursor. function! CopyLinesAbove(count) cal setreg('"',join(getline(line('.') - a:count + 1,line('.') + 1),"\n")."\n") endfunction command! -nargs=1 CopyLinesAboveCmd call CopyLinesAbove(<args>) map Y :<C-U>CopyLinesAboveCmd(v:count)<CR> A: Vky works, just because it is in line block mode. y1k works, but the logic behind Vky seems more straightforward to me.
stackoverflow
{ "language": "en", "length": 151, "provenance": "stackexchange_0000F.jsonl.gz:851964", "question_score": "3", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44502074" }
755d3c5dda4325ad3fb9a930840c1286e30dc4a4
Stackoverflow Stackexchange Q: Excluding $tf folder in TFS 2015 - Copy and Publish Build Artifact I'm trying to exclude $tf folder as part of artifact publishing using "Copy and Publish Artifacts" option as shown in the picture. But it always includes $tf folder and publishes all the files under this folder. I tried this option, but no luck. https://social.msdn.microsoft.com/Forums/en-US/bb22c23e-cb44-44d2-8170-ba5609e9a688/need-help-excluding-the-tf-folder-and-sub-items?forum=TFService "Copy files" is not an option for publishing artifacts, so it would be better if there is any option to exclude the folder. A: Using Copy Files and Publish Build Artifacts tasks instead: * *Copy Files: Source Folder: $(build.sourcesdirectory); Target Folder: $(build.artifactstagingdirectory); Contents: **\* !$tf\** !**\$tf\** *Publish Build Artifacts: Path to Publish: $(build.artifactstagingdirectory)
Q: Excluding $tf folder in TFS 2015 - Copy and Publish Build Artifact I'm trying to exclude $tf folder as part of artifact publishing using "Copy and Publish Artifacts" option as shown in the picture. But it always includes $tf folder and publishes all the files under this folder. I tried this option, but no luck. https://social.msdn.microsoft.com/Forums/en-US/bb22c23e-cb44-44d2-8170-ba5609e9a688/need-help-excluding-the-tf-folder-and-sub-items?forum=TFService "Copy files" is not an option for publishing artifacts, so it would be better if there is any option to exclude the folder. A: Using Copy Files and Publish Build Artifacts tasks instead: * *Copy Files: Source Folder: $(build.sourcesdirectory); Target Folder: $(build.artifactstagingdirectory); Contents: **\* !$tf\** !**\$tf\** *Publish Build Artifacts: Path to Publish: $(build.artifactstagingdirectory)
stackoverflow
{ "language": "en", "length": 110, "provenance": "stackexchange_0000F.jsonl.gz:851972", "question_score": "5", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44502093" }
c5ef178d1cd9e42df93b6f5837e3e3d02dc7924a
Stackoverflow Stackexchange Q: Summing multiple columns in Spark How can I sum multiple columns in Spark? For example, in SparkR the following code works to get the sum of one column, but if I try to get the sum of both columns in df, I get an error. # Create SparkDataFrame df <- createDataFrame(faithful) # Use agg to sum total waiting times head(agg(df, totalWaiting = sum(df$waiting))) ##This works # Use agg to sum total of waiting and eruptions head(agg(df, total = sum(df$waiting, df$eruptions))) ##This doesn't work Either SparkR or PySpark code will work. A: you can do something like the below in pyspark >>> from pyspark.sql import functions as F >>> df = spark.createDataFrame([("a",1,10), ("b",2,20), ("c",3,30), ("d",4,40)], ["col1", "col2", "col3"]) >>> df.groupBy("col1").agg(F.sum(df.col2+df.col3)).show() +----+------------------+ |col1|sum((col2 + col3))| +----+------------------+ | d| 44| | c| 33| | b| 22| | a| 11| +----+------------------+
Q: Summing multiple columns in Spark How can I sum multiple columns in Spark? For example, in SparkR the following code works to get the sum of one column, but if I try to get the sum of both columns in df, I get an error. # Create SparkDataFrame df <- createDataFrame(faithful) # Use agg to sum total waiting times head(agg(df, totalWaiting = sum(df$waiting))) ##This works # Use agg to sum total of waiting and eruptions head(agg(df, total = sum(df$waiting, df$eruptions))) ##This doesn't work Either SparkR or PySpark code will work. A: you can do something like the below in pyspark >>> from pyspark.sql import functions as F >>> df = spark.createDataFrame([("a",1,10), ("b",2,20), ("c",3,30), ("d",4,40)], ["col1", "col2", "col3"]) >>> df.groupBy("col1").agg(F.sum(df.col2+df.col3)).show() +----+------------------+ |col1|sum((col2 + col3))| +----+------------------+ | d| 44| | c| 33| | b| 22| | a| 11| +----+------------------+ A: For PySpark, if you don't want to explicitly type out the columns: from operator import add from functools import reduce new_df = df.withColumn('total',reduce(add, [F.col(x) for x in numeric_col_list])) A: org.apache.spark.sql.functions.sum(Column e) Aggregate function: returns the sum of all values in the expression. As you can see, sum takes just one column as input so sum(df$waiting, df$eruptions) wont work.Since you wan to sum up the numeric fields, you can dosum(df("waiting") + df("eruptions")).If you wan to sum up values for individual columns then, you can df.agg(sum(df$waiting),sum(df$eruptions)).show A: sparkR code: library(SparkR) df <- createDataFrame(sqlContext,faithful) w<-agg(df,sum(df$waiting)),agg(df,sum(df$eruptions)) head(w[[1]]) head(w[[2]]) A: You can use expr(): import pyspark.sql.functions as f numeric_cols = ['col_a','col_b','col_c'] df = df.withColumn('total', f.expr('+'.join(cols))) PySpark expr() is a SQL function to execute SQL-like expressions.
stackoverflow
{ "language": "en", "length": 259, "provenance": "stackexchange_0000F.jsonl.gz:851974", "question_score": "13", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44502095" }
6c089cb0db1c1d0e2d64f5ebb2720acdd9cd71a3
Stackoverflow Stackexchange Q: Should I prefer ThreadLocalRandom or SecureRandom? I know that Random class generates insecure random sequences and I should prefer using SecureRandom when dealing with security. But what about ThreadLocalRandom? Is it more or less secure? // generate a five-digit numeric confirmation code Long code = ThreadLocalRandom.current().nextLong(1, 99999); A: ThreadLocalRandom is something like ThreadLocal<Random> which creates Random instance per thread. This has nothing to do with safety in cryptography context. So the question is "what is the difference between Random and SecureRandom" implementations. SecureRandom differs in that, it passed tests that are required for safety in cryptography. Exactly it passed tests specified by FIPS 140-2 (standard for generators used in cryptography). For more details see SecureRandom javadoc.
Q: Should I prefer ThreadLocalRandom or SecureRandom? I know that Random class generates insecure random sequences and I should prefer using SecureRandom when dealing with security. But what about ThreadLocalRandom? Is it more or less secure? // generate a five-digit numeric confirmation code Long code = ThreadLocalRandom.current().nextLong(1, 99999); A: ThreadLocalRandom is something like ThreadLocal<Random> which creates Random instance per thread. This has nothing to do with safety in cryptography context. So the question is "what is the difference between Random and SecureRandom" implementations. SecureRandom differs in that, it passed tests that are required for safety in cryptography. Exactly it passed tests specified by FIPS 140-2 (standard for generators used in cryptography). For more details see SecureRandom javadoc. A: As described in its javadoc, ThreadLocalRandom is similar to Random (i.e. not secure) but with better performance in case of concurrent access. Instances of ThreadLocalRandom are not cryptographically secure. Consider instead using SecureRandom in security-sensitive applications.
stackoverflow
{ "language": "en", "length": 154, "provenance": "stackexchange_0000F.jsonl.gz:852000", "question_score": "8", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44502167" }
4eac640ffc1128cab41dfa8d333b586f5ca48904
Stackoverflow Stackexchange Q: Angular ForEach in Angular4/Typescript? I see many answers about using ngFor when I search for this, but I understand ngFor. I'm asking about the angular.forEach() constructor used in my Angular 1 controllers. These are flagged as errors in TS and do not compile. For example, I have one with a nested loop: _this.selectChildren = function (data, $event) { var parentChecked = data.checked; angular.forEach(_this.hierarchicalData, function (value, key) { angular.forEach(value.children, function (value, key) { value.checked = parentChecked; }); }); }; What does this construct look like in Typescript for Angular 4? A: in angular4 foreach like that. try this. selectChildren(data, $event) { let parentChecked = data.checked; this.hierarchicalData.forEach(obj => { obj.forEach(childObj=> { value.checked = parentChecked; }); }); }
Q: Angular ForEach in Angular4/Typescript? I see many answers about using ngFor when I search for this, but I understand ngFor. I'm asking about the angular.forEach() constructor used in my Angular 1 controllers. These are flagged as errors in TS and do not compile. For example, I have one with a nested loop: _this.selectChildren = function (data, $event) { var parentChecked = data.checked; angular.forEach(_this.hierarchicalData, function (value, key) { angular.forEach(value.children, function (value, key) { value.checked = parentChecked; }); }); }; What does this construct look like in Typescript for Angular 4? A: in angular4 foreach like that. try this. selectChildren(data, $event) { let parentChecked = data.checked; this.hierarchicalData.forEach(obj => { obj.forEach(childObj=> { value.checked = parentChecked; }); }); } A: arrayData.forEach((key : any, val: any) => { key['index'] = val + 1; arrayData2.forEach((keys : any, vals :any) => { if (key.group_id == keys.id) { key.group_name = keys.group_name; } }) }) A: you can try typescript's For : selectChildren(data , $event){ let parentChecked : boolean = data.checked; for(let o of this.hierarchicalData){ for(let child of o){ child.checked = parentChecked; } } } A: In Typescript use the For Each like below. selectChildren(data, $event) { let parentChecked = data.checked; for(var obj in this.hierarchicalData) { for (var childObj in obj ) { value.checked = parentChecked; } } }
stackoverflow
{ "language": "en", "length": 211, "provenance": "stackexchange_0000F.jsonl.gz:852028", "question_score": "28", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44502267" }
192d37142a0f8fe1a30f52d795cc38ce785e6f84
Stackoverflow Stackexchange Q: skype: how to read chat message using main.db I have tried upto a level but after that I am stuck. Let share steps I followed: * *I can see main.db file at this location C:\Users\Admin\AppData\Roaming\Skype\paul.lowry198. *To open this main.db file I have installed SQL Lite BB Browser application. *This application can show all existing tables in DB. There is one table chats. On this table I am firing a query select * from Chats where friendlyname = 'Jimmy Trevor'; It resulted 69 results. *Now after this how to read the message that he wrote but deleted(or any message) before I could read it. What could be the correct query that can show me the message? A: Note that as of April 2017 and the new Skype for Windows 10 (Skype UWP - Universal Windows Platform), the main.db file has moved to %localappdata%\Packages\Microsoft.SkypeApp_kzf8qxf38zg5c\LocalState\<SkypeUsername>\main.db with %localappdata% being C:\Users\<username>\AppData\Local
Q: skype: how to read chat message using main.db I have tried upto a level but after that I am stuck. Let share steps I followed: * *I can see main.db file at this location C:\Users\Admin\AppData\Roaming\Skype\paul.lowry198. *To open this main.db file I have installed SQL Lite BB Browser application. *This application can show all existing tables in DB. There is one table chats. On this table I am firing a query select * from Chats where friendlyname = 'Jimmy Trevor'; It resulted 69 results. *Now after this how to read the message that he wrote but deleted(or any message) before I could read it. What could be the correct query that can show me the message? A: Note that as of April 2017 and the new Skype for Windows 10 (Skype UWP - Universal Windows Platform), the main.db file has moved to %localappdata%\Packages\Microsoft.SkypeApp_kzf8qxf38zg5c\LocalState\<SkypeUsername>\main.db with %localappdata% being C:\Users\<username>\AppData\Local A: I've done this 4 years ago and today I need that again. So after opening main.db file using SQLite Browser you need to: Determine id of conversation (one person could be in multiple conversations): select conv_dbid, * from Chats where friendlyname like '%Friendly Name%'; OR you can find desired conversation id using select * from Conversations where identity like '%accountname%'; Finally, SQL Query to get all messages from desired conversation: select body_xml, datetime(timestamp, 'unixepoch'), edited_by, edited_timestamp from Messages where convo_id=YOUR_CONVERSATION_ID; Unfortunately I discovered that main.db fields body_xml, edited_by and edited_timestamp changed every time person editing/deleting message and there is no backup of body_xml in main.db. But don't worry! There is a folder chatsync near main.db database (in your skype account folder /AppData/Roaming/Skype/Account_Name_Folder). All messages in chatsync is in Skype binary format, to read them you can use lightweight free utility http://www.nirsoft.net/utils/skype_log_view.html Here is the start dialog of SkypeLogView, it automatically selects your skype directory (better close your skype application, but it is not necessary). Choose dates to faster up search process. Voila! A: Browse and export your Skype history online I recently used the following platform : http://www.skypebrowser.com/ (UPDATE the domain is down) Steps * *Click to upload your Skype database (Max 30 MB). find main.db from local disk Find the main.db file: C:\Users<WindowsUserName>\AppData\Roaming\Skype<SkypeUserName>\main.db wait a couple minutes to upload the file: Uploaded main.db file *After open file, you can export as html zip file to your local disk, with export button. Export as zipped html file *Save as the file. Save as the zip file Inside zip file, exist all conversations with people or groups in different html files.
stackoverflow
{ "language": "en", "length": 418, "provenance": "stackexchange_0000F.jsonl.gz:852034", "question_score": "10", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44502283" }
dd2c0964fa392c59495da71f5f811ed71c17f39f
Stackoverflow Stackexchange Q: app that simulates bluetooth device (printer) Is it possible to create an andorid application, that can simulate bluetooth device? I want make Android OS to think, that it is connected to the bluetooth device, which is actualy simulated by my application. Scenario: user will install my app, and voilà, there is my virtual bluetooth device - actually, it is printer. I need it to work on non-rooted devices. Reason: some guys are too lazy to support other bluetooth printers, so I want to simulate the supported bluetooth printer and print the data on another bluetooth printer. FYI: I did't found, that it is not possible. I know, that emulator can not simulate it, but ShadowBluetoothAdapter from robolectric can.
Q: app that simulates bluetooth device (printer) Is it possible to create an andorid application, that can simulate bluetooth device? I want make Android OS to think, that it is connected to the bluetooth device, which is actualy simulated by my application. Scenario: user will install my app, and voilà, there is my virtual bluetooth device - actually, it is printer. I need it to work on non-rooted devices. Reason: some guys are too lazy to support other bluetooth printers, so I want to simulate the supported bluetooth printer and print the data on another bluetooth printer. FYI: I did't found, that it is not possible. I know, that emulator can not simulate it, but ShadowBluetoothAdapter from robolectric can.
stackoverflow
{ "language": "en", "length": 119, "provenance": "stackexchange_0000F.jsonl.gz:852042", "question_score": "4", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44502302" }
2c3d6b315dd831e4dc78af9ab310fa38045d64e6
Stackoverflow Stackexchange Q: Pandas dataframe to_csv - split into multiple output files What is the best /easiest way to split a very large data frame (50GB) into multiple outputs (horizontally)? I thought about doing something like: stepsize = int(1e8) for id, i in enumerate(range(0,df.size,stepsize)): start = i end = i + stepsize-1 #neglect last row ... df.ix[start:end].to_csv('/data/bs_'+str(id)+'.csv.out') But I bet there is a smarter solution out there? As noted by jakevdp, HDF5 is a better way to store huge amounts of numerical data, however it doesn't meet my business requirements. A: This answer brought me to a satisfying solution using: * *numpy.array_split(object, number_of_chunks) for idx, chunk in enumerate(np.array_split(df, number_of_chunks)): chunk.to_csv(f'/data/bs_{idx}.csv')
Q: Pandas dataframe to_csv - split into multiple output files What is the best /easiest way to split a very large data frame (50GB) into multiple outputs (horizontally)? I thought about doing something like: stepsize = int(1e8) for id, i in enumerate(range(0,df.size,stepsize)): start = i end = i + stepsize-1 #neglect last row ... df.ix[start:end].to_csv('/data/bs_'+str(id)+'.csv.out') But I bet there is a smarter solution out there? As noted by jakevdp, HDF5 is a better way to store huge amounts of numerical data, however it doesn't meet my business requirements. A: This answer brought me to a satisfying solution using: * *numpy.array_split(object, number_of_chunks) for idx, chunk in enumerate(np.array_split(df, number_of_chunks)): chunk.to_csv(f'/data/bs_{idx}.csv') A: Use id in the filename else it will not work. You missed id, and without id, it gives an error. for id, df_i in enumerate(np.array_split(df, number_of_chunks)): df_i.to_csv('/data/bs_{id}.csv'.format(id=id))
stackoverflow
{ "language": "en", "length": 136, "provenance": "stackexchange_0000F.jsonl.gz:852043", "question_score": "18", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44502306" }
114313f3f75f2783498131c1e0c69a594abb3be3
Stackoverflow Stackexchange Q: How to clear all WebView stored information? I have an Android browser and I have the option to clear cache, storage, cookies, etc. The code looks like this: webView.clearCache(true); webView.clearFormData(); webView.clearHistory(); webView.clearSslPreferences(); CookieManager.getInstance().removeAllCookies(null); CookieManager.getInstance().flush(); And this seems to work on all my tests but when I go to google.com my old searches are still there. What am I not clearing? Thanks. A: Found the solution: WebStorage.getInstance().deleteAllData();
Q: How to clear all WebView stored information? I have an Android browser and I have the option to clear cache, storage, cookies, etc. The code looks like this: webView.clearCache(true); webView.clearFormData(); webView.clearHistory(); webView.clearSslPreferences(); CookieManager.getInstance().removeAllCookies(null); CookieManager.getInstance().flush(); And this seems to work on all my tests but when I go to google.com my old searches are still there. What am I not clearing? Thanks. A: Found the solution: WebStorage.getInstance().deleteAllData(); A: I have got a root-access granted device and found that calling WebStorage.getInstance().deleteAllData(); and similar codes doesn't clear the cache created by the WebView at applicationDatadir/app_webview. Also, that codes sometimes causes fatal errors like A/libc: Send stop signal to pid:16145 in void debuggerd_signal_handler(int, siginfo_t*, void*) And it's (the cache) not so small in size. To achieve that you can use this following code snippet : public static void clearWebViewCachesCustom(Context context) { try { String dataDir = mContext.getPackageManager().getPackageInfo(context.getPackageName(), 0).applicationInfo.dataDir; new File(dataDir + "/app_webview/").delete(); } catch (Exception e) { if (!MainActivity.deBugTest) Crashlytics.logException(e); e.printStackTrace(); e.getSuppressed(); } }
stackoverflow
{ "language": "en", "length": 161, "provenance": "stackexchange_0000F.jsonl.gz:852088", "question_score": "5", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44502450" }
b48ed139058b23c683476f85160dc414efc19899
Stackoverflow Stackexchange Q: bx cs locations not displaying location based on region I login into Bluemix and the origin region is us-south. I issue the command bx cs locations and it returns dal10 and dal12. I change my region using command bx target -r eu-de and check to see if the region is changed with bx info. Of course, the region is now eu-de as expected. I issue the command bx cs locations to find the locations within this region, but the locations returned are dal10 and dal12, which are in region us-south and not a part of region eu-de. How do I list the Container Services locations based on region? A: There are two different regions types to consider. One is the overall Bluemix region. The other is the IBM Bluemix Container Service region. Kubernetes is available in two different IBM Bluemix Container Service regions, us-south and eu-central. To access these regions, use the optional --host flag when you initialize the container service. For eu-central, that would be bx cs init --host https://eu-central.containers.bluemix.net You can see the log in path in the docs: https://console.ng.bluemix.net/docs/containers/cs_cli_install.html#cs_cli_configure
Q: bx cs locations not displaying location based on region I login into Bluemix and the origin region is us-south. I issue the command bx cs locations and it returns dal10 and dal12. I change my region using command bx target -r eu-de and check to see if the region is changed with bx info. Of course, the region is now eu-de as expected. I issue the command bx cs locations to find the locations within this region, but the locations returned are dal10 and dal12, which are in region us-south and not a part of region eu-de. How do I list the Container Services locations based on region? A: There are two different regions types to consider. One is the overall Bluemix region. The other is the IBM Bluemix Container Service region. Kubernetes is available in two different IBM Bluemix Container Service regions, us-south and eu-central. To access these regions, use the optional --host flag when you initialize the container service. For eu-central, that would be bx cs init --host https://eu-central.containers.bluemix.net You can see the log in path in the docs: https://console.ng.bluemix.net/docs/containers/cs_cli_install.html#cs_cli_configure A: To expand on the above answer, you can also do another bx cs init after switching the bx region, and bx cs init will use the default host for the bx region you are in. So if you don't want to learn the hosts needed, you can do it by changing bx regions, just remember to do bx cs init after.
stackoverflow
{ "language": "en", "length": 245, "provenance": "stackexchange_0000F.jsonl.gz:852094", "question_score": "3", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44502460" }
77dcae4c041b11b227bbdb09424fdf9b8185434b
Stackoverflow Stackexchange Q: How to create and fill a list of lists in a for loop I'm trying to populate a list with a for loop. This is what I have so far: newlist = [] for x in range(10): for y in range(10): newlist.append(y) and at this point I am stumped. I was hoping the loops would give me a list of 10 lists. A: You can use this one line code with list comprehension to achieve the same result: new_list = [[i for i in range(10)] for j in range(10)]
Q: How to create and fill a list of lists in a for loop I'm trying to populate a list with a for loop. This is what I have so far: newlist = [] for x in range(10): for y in range(10): newlist.append(y) and at this point I am stumped. I was hoping the loops would give me a list of 10 lists. A: You can use this one line code with list comprehension to achieve the same result: new_list = [[i for i in range(10)] for j in range(10)] A: Alternatively, you only need one loop and append range(10). newlist = [] for x in range(10): newlist.append(list(range(10))) Or newlist = [list(range(10)) for _ in range(10)] A: Or just nested list comprehension [[x for x in range(10)] for _ in range(10)] A: You were close to it. But you need to append new elements in the inner loop to an empty list, which will be append as element of the outer list. Otherwise you will get (as you can see from your code) a flat list of 100 elements. newlist = [] for x in range(10): innerlist = [] for y in range(10): innerlist.append(y) newlist.append(innerlist) print(newlist) See the comment below by Błotosmętek for a more concise version of it. A: You should put a intermiate list to get another level newlist = [] for x in range(10): temp_list = [] for y in range(10): temp_list.append(y) newlist.append(temp_list) A: newlist = [list(range(10))] * 10
stackoverflow
{ "language": "en", "length": 242, "provenance": "stackexchange_0000F.jsonl.gz:852104", "question_score": "11", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44502482" }
b594c19965543c869ad57f6e4203f3ff2829d096
Stackoverflow Stackexchange Q: How to wait a ResultCallback from DockerClient.waitContainerCmd()? In our J2EE project, we're doing some video conversion by calling sjourdan/ffmpeg docker from our java code, relying on docker-java. For now, it looks something like this (quite simplified for brevity): CreateContainerCmd createCommand = dockerClient.createContainerCmd("sjourdan/ffmpeg") .withVolumes(aVolume) .withBinds(aBind) .withCmd("a lot of options about the conversion itself, codec, ratio, …"); CreateContainerResponse container = createCommand.exec(); dockerClient.startContainerCmd(container.getId()).exec(); // vanilla implementation of ResultCallback MyResultCallback callback = new MyResultCallback(); dockerClient.waitContainerCmd(container.getId()).exec(callback); Fact is, of course, the thread keeps going after that without waiting the callback to be called back. How should we do to force the thread to wait until the callback is called, informing us the docker command is fully ended? On another note, if someone knows how to add a --rm parameter to the played docker run command using docker-java, I'm interested. A: Well, if anyone needs a solution, here what we implemented at the end: CountDownLatch latch = new CountDownLatch(1); MyResultCallback callback = new MyResultCallback(latch); dockerClient.waitContainerCmd(container.getId()).exec(callback); return latch.await(5, TimeUnits.MINUTES); and the MyResultCallback.onComplete looks like public void onComplete() { latch.countDown(); } Seems a nice enough way to handle the waiting from the main thread.
Q: How to wait a ResultCallback from DockerClient.waitContainerCmd()? In our J2EE project, we're doing some video conversion by calling sjourdan/ffmpeg docker from our java code, relying on docker-java. For now, it looks something like this (quite simplified for brevity): CreateContainerCmd createCommand = dockerClient.createContainerCmd("sjourdan/ffmpeg") .withVolumes(aVolume) .withBinds(aBind) .withCmd("a lot of options about the conversion itself, codec, ratio, …"); CreateContainerResponse container = createCommand.exec(); dockerClient.startContainerCmd(container.getId()).exec(); // vanilla implementation of ResultCallback MyResultCallback callback = new MyResultCallback(); dockerClient.waitContainerCmd(container.getId()).exec(callback); Fact is, of course, the thread keeps going after that without waiting the callback to be called back. How should we do to force the thread to wait until the callback is called, informing us the docker command is fully ended? On another note, if someone knows how to add a --rm parameter to the played docker run command using docker-java, I'm interested. A: Well, if anyone needs a solution, here what we implemented at the end: CountDownLatch latch = new CountDownLatch(1); MyResultCallback callback = new MyResultCallback(latch); dockerClient.waitContainerCmd(container.getId()).exec(callback); return latch.await(5, TimeUnits.MINUTES); and the MyResultCallback.onComplete looks like public void onComplete() { latch.countDown(); } Seems a nice enough way to handle the waiting from the main thread. A: There is a built-in solution today: var callback = new WaitContainerResultCallback(); docker.waitContainerCmd(container.getId()).exec(callback); callback.awaitStarted(); The thread will be blocked until the contain is started.
stackoverflow
{ "language": "en", "length": 211, "provenance": "stackexchange_0000F.jsonl.gz:852113", "question_score": "3", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44502516" }
0e1a8a3cd9fbb8371e250e08577cc0ddf58a1cec
Stackoverflow Stackexchange Q: httpbuilder-ng multipart/form-data with large application/zip part When trying to upload a large zip via multipart/form-data with httpbuilder-ng apache implementation version 0.16.1 i get org.apache.http.ContentTooLongException: Content length is too long: 109370 at org.apache.http.entity.mime.MultipartFormEntity.getContent(MultipartFormEntity.java:103) at groovyx.net.http.ApacheEncoders.multipart(ApacheEncoders.java:74) and that is Ok because the zip is rather large and there's no reason to wiggle it around buffers and ByteArray[Input|Output]Stream, question is how do i actually send the multipart to the connection's output stream? i tried to customize the encoder but the ToServer only exposes one method that accepts an InputStream which doesn't really work for me Here is a snippet of what i was doing configure { request.uri = 'https://anypoint.mulesoft.com' request.contentType = JSON[0] request.encoder(MULTIPART_FORMDATA[0], ApacheEncoders.&multipart) } .post { request.uri.path = '/cloudhub/api/v2/applications' request.headers['X-ANYPNT-ENV-ID'] = eid request.contentType = MULTIPART_FORMDATA[0] request.body = multipart { part('appInfoJson', JSON[0], '{"domain":"myDomain"}') part('autoStart', 'true') part( 'file', 'myLargeZip.zip', BINARY[0], // or 'application/zip' new File('/parent', 'myLargeZip.zip') ) } }
Q: httpbuilder-ng multipart/form-data with large application/zip part When trying to upload a large zip via multipart/form-data with httpbuilder-ng apache implementation version 0.16.1 i get org.apache.http.ContentTooLongException: Content length is too long: 109370 at org.apache.http.entity.mime.MultipartFormEntity.getContent(MultipartFormEntity.java:103) at groovyx.net.http.ApacheEncoders.multipart(ApacheEncoders.java:74) and that is Ok because the zip is rather large and there's no reason to wiggle it around buffers and ByteArray[Input|Output]Stream, question is how do i actually send the multipart to the connection's output stream? i tried to customize the encoder but the ToServer only exposes one method that accepts an InputStream which doesn't really work for me Here is a snippet of what i was doing configure { request.uri = 'https://anypoint.mulesoft.com' request.contentType = JSON[0] request.encoder(MULTIPART_FORMDATA[0], ApacheEncoders.&multipart) } .post { request.uri.path = '/cloudhub/api/v2/applications' request.headers['X-ANYPNT-ENV-ID'] = eid request.contentType = MULTIPART_FORMDATA[0] request.body = multipart { part('appInfoJson', JSON[0], '{"domain":"myDomain"}') part('autoStart', 'true') part( 'file', 'myLargeZip.zip', BINARY[0], // or 'application/zip' new File('/parent', 'myLargeZip.zip') ) } }
stackoverflow
{ "language": "en", "length": 146, "provenance": "stackexchange_0000F.jsonl.gz:852137", "question_score": "5", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44502619" }
69ea6c957bed2f6b230bda5de3b5eff7c1fb352c
Stackoverflow Stackexchange Q: Nodejs 6.10.2 crypto AES Invalid key length I try to use crypto to encrypt a file. Here is my code: const crypto = require('crypto'); const fs = require('fs'); const input = fs.createReadStream('test.jpg'); const output = fs.createWriteStream('test.enc'); const sharedSecret = crypto.randomBytes(256); const initializationVector = crypto.randomBytes(16); const cipher = crypto.createCipheriv('aes-256-cbc', sharedSecret, initializationVector); input.pipe(cipher).pipe(output); I got the error: crypto.js:191 this._handle.initiv(cipher, toBuf(key), toBuf(iv)); ^ Error: Invalid key length at Error (native) at new Cipheriv (crypto.js:191:16) at Object.Cipheriv (crypto.js:189:12) at Object.<anonymous> (/Users/lijinyao/Projects/HyperAlbum/Encryption/encrypt.js:10:23) at Module._compile (module.js:570:32) at Object.Module._extensions..js (module.js:579:10) at Module.load (module.js:487:32) at tryModuleLoad (module.js:446:12) at Function.Module._load (module.js:438:3) at Module.runMain (module.js:604:10) I though the sharedSecret length should be same as aes-length but it's not. What length should I use? Thanks :) A: You have bytes confused with bits. aes-256 means 256 bits = 32 bytes. Try this: const crypto = require('crypto'); const fs = require('fs'); const input = fs.createReadStream('test.jpg'); const output = fs.createWriteStream('test.enc'); const sharedSecret = crypto.randomBytes(32); const initializationVector = crypto.randomBytes(16); const cipher = crypto.createCipheriv('aes-256-cbc', sharedSecret, initializationVector); input.pipe(cipher).pipe(output); If you can't see the difference, the change is: const sharedSecret = crypto.randomBytes(32);
Q: Nodejs 6.10.2 crypto AES Invalid key length I try to use crypto to encrypt a file. Here is my code: const crypto = require('crypto'); const fs = require('fs'); const input = fs.createReadStream('test.jpg'); const output = fs.createWriteStream('test.enc'); const sharedSecret = crypto.randomBytes(256); const initializationVector = crypto.randomBytes(16); const cipher = crypto.createCipheriv('aes-256-cbc', sharedSecret, initializationVector); input.pipe(cipher).pipe(output); I got the error: crypto.js:191 this._handle.initiv(cipher, toBuf(key), toBuf(iv)); ^ Error: Invalid key length at Error (native) at new Cipheriv (crypto.js:191:16) at Object.Cipheriv (crypto.js:189:12) at Object.<anonymous> (/Users/lijinyao/Projects/HyperAlbum/Encryption/encrypt.js:10:23) at Module._compile (module.js:570:32) at Object.Module._extensions..js (module.js:579:10) at Module.load (module.js:487:32) at tryModuleLoad (module.js:446:12) at Function.Module._load (module.js:438:3) at Module.runMain (module.js:604:10) I though the sharedSecret length should be same as aes-length but it's not. What length should I use? Thanks :) A: You have bytes confused with bits. aes-256 means 256 bits = 32 bytes. Try this: const crypto = require('crypto'); const fs = require('fs'); const input = fs.createReadStream('test.jpg'); const output = fs.createWriteStream('test.enc'); const sharedSecret = crypto.randomBytes(32); const initializationVector = crypto.randomBytes(16); const cipher = crypto.createCipheriv('aes-256-cbc', sharedSecret, initializationVector); input.pipe(cipher).pipe(output); If you can't see the difference, the change is: const sharedSecret = crypto.randomBytes(32);
stackoverflow
{ "language": "en", "length": 176, "provenance": "stackexchange_0000F.jsonl.gz:852142", "question_score": "7", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44502637" }
e5c364996dd2325c0a70ac3fe30fbe622c036ee6
Stackoverflow Stackexchange Q: Office 365 manifest size limit While trying use the office365 admin portal to upload an add-in for our organization we encountered the following problem: On the final step of the dialog, an error message appears claiming the following: With devtools open I can see that the failing request contains the following error message: "FailedWriteToExchange". This is strange, since uploading the Contoso Citations add-in maninfest works just fine. The manifests will work properly if it is sideloaded in Word Desktop (office365), and all XSD checks pass. With a little bit of trial and error it seems that there is a limit to how large the manifest-files can be. I tested different versions of the Contoso manifest by padding them with gibberish resource strings until the upload failed. When the manifest size exceeded about 36400 bytes, the upload failed. Does anyone know about this size limit? Is there a way to work around it? Thanks in advance.
Q: Office 365 manifest size limit While trying use the office365 admin portal to upload an add-in for our organization we encountered the following problem: On the final step of the dialog, an error message appears claiming the following: With devtools open I can see that the failing request contains the following error message: "FailedWriteToExchange". This is strange, since uploading the Contoso Citations add-in maninfest works just fine. The manifests will work properly if it is sideloaded in Word Desktop (office365), and all XSD checks pass. With a little bit of trial and error it seems that there is a limit to how large the manifest-files can be. I tested different versions of the Contoso manifest by padding them with gibberish resource strings until the upload failed. When the manifest size exceeded about 36400 bytes, the upload failed. Does anyone know about this size limit? Is there a way to work around it? Thanks in advance.
stackoverflow
{ "language": "en", "length": 156, "provenance": "stackexchange_0000F.jsonl.gz:852151", "question_score": "4", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44502660" }
40fc95cfe8908d933ab05226bea10ce859f716ad
Stackoverflow Stackexchange Q: Run Scaffold-DbContext on Visual Studio for Mac I have a site that was built using database first and I'm trying to continue development of it on a mac. Normally I would run the Scaffold-dbContext using the Console Package Manager in Visual Studio. The mac version doesn't have this I tried running it in Terminal, but that obviously didn't work. Is it possible to run this command, or do I need to continue development on Windows?  A: You can run the command from the terminal after completing a few required steps as found here: * *You need to add the following manually to your *.csproj <ItemGroup> <DotNetCliToolReference Include="Microsoft.EntityFrameworkCore.Tools.DotNet" Version="2.0.0" /> </ItemGroup> *Execute dotnet add package Microsoft.EntityFrameworkCore.Design 3.Execute dotnet restore You should now be able to scaffold using the command: dotnet ef dbcontext scaffold --help
Q: Run Scaffold-DbContext on Visual Studio for Mac I have a site that was built using database first and I'm trying to continue development of it on a mac. Normally I would run the Scaffold-dbContext using the Console Package Manager in Visual Studio. The mac version doesn't have this I tried running it in Terminal, but that obviously didn't work. Is it possible to run this command, or do I need to continue development on Windows?  A: You can run the command from the terminal after completing a few required steps as found here: * *You need to add the following manually to your *.csproj <ItemGroup> <DotNetCliToolReference Include="Microsoft.EntityFrameworkCore.Tools.DotNet" Version="2.0.0" /> </ItemGroup> *Execute dotnet add package Microsoft.EntityFrameworkCore.Design 3.Execute dotnet restore You should now be able to scaffold using the command: dotnet ef dbcontext scaffold --help A: Here is the code work for me on visual studio mac Install the below package using visual studio mac edit references on project or add package to .csproj file. Microsoft.EntityFrameworkCore.SqlServer Microsoft.EntityFrameworkCore.Tools Microsoft.VisualStudio.Web.CodeGeneration.Design or using the Terminal navigate to the project and use the below command - dotnet add package Microsoft.EntityFrameworkCore.SqlServer dotnet add package Microsoft.EntityFrameworkCore.Tools dotnet add package Microsoft.VisualStudio.Web.CodeGeneration.Design Now check the tools and EF are installed or not.Navigate to the project install location and use mac terminal with below command. It should show entity framework details dotnet ef Now Scaffold the DB context dotnet ef dbcontext Scaffold "Server=<servername>,1433;Initial Catalog=<dbName>;Persist Security Info=False;User ID=<userID>;Password=<password>;MultipleActiveResultSets=False;Encrypt=True;TrustServerCertificate=False;Connection Timeout=30;"Microsoft.EntityFrameworkCore.SqlServer -o <directory name> References https://learn.microsoft.com/en-us/ef/core/miscellaneous/cli/dotnet https://learn.microsoft.com/en-us/ef/core/get-started/aspnetcore/existing-db https://www.learnentityframeworkcore.com/walkthroughs/existing-database A: I just wanted to post my solution after I struggled for a while. Had to separate the schema string into multiple --schema options. dotnet ef dbcontext scaffold "Server=<servername>,1433;Initial Catalog=<dbName>;Persist Security Info=False;User ID=<userID>;Password=<password>;MultipleActiveResultSets=False;Encrypt=True;TrustServerCertificate=False;Connection Timeout=30;" Microsoft.EntityFrameworkCore.SqlServer --context [context] -f --output-dir [dir] --schema [schema1] --schema [schema2] A: If you are using .NET Core 3.0.0 even after installing EntityFrameworkCore you will need to run dotnet add package Microsoft.EntityFrameworkCore.SqlServer
stackoverflow
{ "language": "en", "length": 310, "provenance": "stackexchange_0000F.jsonl.gz:852162", "question_score": "12", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44502698" }
a7c318f665c22f01f00e47299e17baa00e9245be
Stackoverflow Stackexchange Q: how to find my angular version in my project? I have setup the angular code on my local machine. I need to know the version of the angular that I am using in the project. how can I easily find it in cmd prompt? A: There are several ways you can do that: * *Go into node_modules/@angular/core/package.json and check version field. *If you need to use it in your code, you can import it from the @angular/core: import { VERSION } from '@angular/core'; *Inspect the rendered DOM - Angular adds the version to the main component element: <my-app ng-version="4.1.3">
Q: how to find my angular version in my project? I have setup the angular code on my local machine. I need to know the version of the angular that I am using in the project. how can I easily find it in cmd prompt? A: There are several ways you can do that: * *Go into node_modules/@angular/core/package.json and check version field. *If you need to use it in your code, you can import it from the @angular/core: import { VERSION } from '@angular/core'; *Inspect the rendered DOM - Angular adds the version to the main component element: <my-app ng-version="4.1.3"> A: You can also find dependencies version details in package.json file as following: A: try this command : ng --version It prints out Angular, Angular CLI, Node, Typescript versions etc. A: If you try to check angular version in the browser, for me only this worked Ctrl+Shift+i and paste below command in console: document.querySelector('[ng-version]').getAttribute('ng-version') ex: A: * *Browser > Inspect > Element > <.app-root _nghost-hey-c0="" ng-version="8.2.11"> *In terminal :> ng version :> ng --version :> ng -v A: For Angular 1 or 2 (but not for Angular 4+): You can also open the console and go to the element tab on the developer tools of whatever browser you use. Or Type angular.version to access the Javascript object that holds angular version. For Angular 4+ There is are the number of ways as listed below : Write below code in the command prompt/or in the terminal in the VS Code. * *ng version or ng --version (See the attachment for the reference.) *ng v *ng -v In the terminal you can find the angular version as shown in the attached image : *You can also open the console and go to the element tab on the developer tools of whatever browser you use. As displayed in the below image : 5.Find the package.json file, You will find all the installed packages and their version. *declare the variable named as 'VERSION', Import the dependencies. import { VERSION } from '@angular/core'; // To display the version in the console. console.log(VERSION.full); A: The best way to check which version of angular you're using is to launch your browser, right-click /inspect and look in the element tab. I found that to be most helpful. A: app.component import { Component, VERSION, } from '@angular/core'; name = 'Angular version' + VERSION.major; app.component.html <h5>{{name}}</h5> A: define VERSION variable and import version into it. import { VERSION } from '@angular/core'; Now you can use VERSION variable in your code to print version For example, console.log(VERSION.full); A: For Angular 2+ you can run this in the console: document.querySelector('[ng-version]').getAttribute('ng-version') For AngularJS 1.x: angular.version.full A: ng --version command will show only the installed angular version in your computer instead of the actual project version. if you really want to know the project version, Go to your project, use the below command npm list -local Another way, check package.json to know the angular version A: you can use ng --version for angular version 7
stackoverflow
{ "language": "en", "length": 501, "provenance": "stackexchange_0000F.jsonl.gz:852187", "question_score": "127", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44502779" }
8f11dc37a5f15c1a75e9b07f70672c4783fc854a
Stackoverflow Stackexchange Q: Use λ,φ,α,... in the jupyter notebook, just as in julia As some of you probably know, you can use λ,φ,α,.., in any julia script. Couldnt this also be possible for python? I would use julia, but there are still some packages from python which I would have to wrap. Best wishes A: Python 3 supports λ,φ,α and many other Unicode characters in identifiers (as mentioned by @jwodder). In jupyter notebook, you can access these characters by typing \<character name><tab> Example \alpha<tab> = 1 # α = 1 Not all Unicode characters can be used as variable names, e.g. emojis: >>> ♥ = "love" File "<ipython-input-29-97d253080b57>", line 1 ♥ = "love" ^ SyntaxError: invalid character in identifier However, letter-like characters are allowed, particularly in foreign languages: >>> αγαπώ = "love" >>> люблю = "love" >>> 愛 = "love" See also David Beazley's talk Mastering Python 3 I/O for more on practical uses of Unicode. * *REF 001: Unicode variables names *REF 002: PEP 3131 - Supporting Non-ASCII Identifiers *REF 003: Unicode HOWTO
Q: Use λ,φ,α,... in the jupyter notebook, just as in julia As some of you probably know, you can use λ,φ,α,.., in any julia script. Couldnt this also be possible for python? I would use julia, but there are still some packages from python which I would have to wrap. Best wishes A: Python 3 supports λ,φ,α and many other Unicode characters in identifiers (as mentioned by @jwodder). In jupyter notebook, you can access these characters by typing \<character name><tab> Example \alpha<tab> = 1 # α = 1 Not all Unicode characters can be used as variable names, e.g. emojis: >>> ♥ = "love" File "<ipython-input-29-97d253080b57>", line 1 ♥ = "love" ^ SyntaxError: invalid character in identifier However, letter-like characters are allowed, particularly in foreign languages: >>> αγαπώ = "love" >>> люблю = "love" >>> 愛 = "love" See also David Beazley's talk Mastering Python 3 I/O for more on practical uses of Unicode. * *REF 001: Unicode variables names *REF 002: PEP 3131 - Supporting Non-ASCII Identifiers *REF 003: Unicode HOWTO
stackoverflow
{ "language": "en", "length": 172, "provenance": "stackexchange_0000F.jsonl.gz:852198", "question_score": "3", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44502828" }
e6e4455a17fb466eac9958de743eb5832836a81e
Stackoverflow Stackexchange Q: Why is zlib.crc32 faster than binascii.crc32? In the following benchmark, I find that the function zlib.crc32 is about 2.5 times faster binascii.crc32. Why is that, and are there any downsides to using the zlib module's implementation? #!/usr/bin/python3 import timeit print("b:", timeit.timeit("binascii.crc32(data)", setup="import binascii, zlib; data=b'X'*4096", number=100000)) print("z:", timeit.timeit("zlib.crc32(data)", setup="import binascii, zlib; data=b'X'*4096", number=100000)) Result: b: 1.0176826480001182 z: 0.4006126120002591 A: I found this discussion: https://mail.python.org/pipermail/python-3000/2008-March/012728.html where Gregory P. Smith (in a discussion with Guido) wrote: Removal from binascii would break things for platforms or embedded systems wanting crc32 that don't want to include zlib. Anyone care? TL;DR: The binascii implementation is for systems that don't have zlib (or don't want to include it), so it's considered sub-optimal, but would break things if removed.
Q: Why is zlib.crc32 faster than binascii.crc32? In the following benchmark, I find that the function zlib.crc32 is about 2.5 times faster binascii.crc32. Why is that, and are there any downsides to using the zlib module's implementation? #!/usr/bin/python3 import timeit print("b:", timeit.timeit("binascii.crc32(data)", setup="import binascii, zlib; data=b'X'*4096", number=100000)) print("z:", timeit.timeit("zlib.crc32(data)", setup="import binascii, zlib; data=b'X'*4096", number=100000)) Result: b: 1.0176826480001182 z: 0.4006126120002591 A: I found this discussion: https://mail.python.org/pipermail/python-3000/2008-March/012728.html where Gregory P. Smith (in a discussion with Guido) wrote: Removal from binascii would break things for platforms or embedded systems wanting crc32 that don't want to include zlib. Anyone care? TL;DR: The binascii implementation is for systems that don't have zlib (or don't want to include it), so it's considered sub-optimal, but would break things if removed.
stackoverflow
{ "language": "en", "length": 123, "provenance": "stackexchange_0000F.jsonl.gz:852203", "question_score": "8", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44502855" }
3f556974392710b882604cb6069bafc0c8d69cdb
Stackoverflow Stackexchange Q: How can I get the current SparkSession in any place of the codes? I have created a session in the main() function, like this: val sparkSession = SparkSession.builder.master("local[*]").appName("Simple Application").getOrCreate() Now if I want to configure the application or access the properties, I can use the local variable sparkSession in the same function. What if I want to access this sparkSession elsewhere in the same project, like project/module/.../.../xxx.scala. What should I do? A: Since 2.2.0 you can access the active SparkSession through: /** * Returns the active SparkSession for the current thread, returned by the builder. * * @since 2.2.0 */ def getActiveSession: Option[SparkSession] = Option(activeThreadSession.get) or default SparkSession: /** * Returns the default SparkSession that is returned by the builder. * * @since 2.2.0 */ def getDefaultSparkSession: Option[SparkSession] = Option(defaultSession.get)
Q: How can I get the current SparkSession in any place of the codes? I have created a session in the main() function, like this: val sparkSession = SparkSession.builder.master("local[*]").appName("Simple Application").getOrCreate() Now if I want to configure the application or access the properties, I can use the local variable sparkSession in the same function. What if I want to access this sparkSession elsewhere in the same project, like project/module/.../.../xxx.scala. What should I do? A: Since 2.2.0 you can access the active SparkSession through: /** * Returns the active SparkSession for the current thread, returned by the builder. * * @since 2.2.0 */ def getActiveSession: Option[SparkSession] = Option(activeThreadSession.get) or default SparkSession: /** * Returns the default SparkSession that is returned by the builder. * * @since 2.2.0 */ def getDefaultSparkSession: Option[SparkSession] = Option(defaultSession.get) A: When SparkSession variable has been defined as val sparkSession = SparkSession.builder.master("local[*]").appName("Simple Application").getOrCreate() This variable is going to point/refer to only one SparkSession as its a val. And you can always pass to different classes for them to access as well as val newClassCall = new NewClass(sparkSession) Now you can use the same sparkSession in that new class as well. A: Once a session was created (anywhere), you can safely use: SparkSession.builder.getOrCreate() To get the (same) session anywhere in the code, as long as the session is still alive. Spark maintains a single active session so unless it was stopped or crashed, you'll get the same one. Edit: builder is not callable, as mentioned in the comments. A: This is a old question and there are couple of answer that are good enough but I would like to give one more approach that can be used to make it work. You can create a trait that extends from serializable and create spark session as a lazy variable and then through out your project in all the objects that you create, you can extend that trait and it will give you sparksession instance. Code as below: import org.apache.spark.sql.SparkSession import org.apache.spark.sql.DataFrame trait SparkSessionWrapper extends Serializable { lazy val spark: SparkSession = { SparkSession.builder().appName("TestApp").getOrCreate() } //object with the main method and it extends SparkSessionWrapper object App extends SparkSessionWrapper { def main(args: Array[String]): Unit = { val readdf = ReadFileProcessor.ReadFile("testpath") readdf.createOrReplaceTempView("TestTable") val viewdf = spark.sql("Select * from TestTable") } } object ReadFileProcessor extends SparkSessionWrapper{ def ReadFile(path: String) : DataFrame = { val df = spark.read.format("csv").load(path) df } } As you are extending the SparkSessionWrapper on both the Objects that you created, spark session would get initialized when first time spark variable is encountered in the code and then you refer it on any object that extends that trait without passing that as a parameter to the method. It works or give you a experience that is similar to notebook. Update : If you even want it to be more generic and have an need to even set the custom appname based on the type of workflow you are running you can do it as below : import org.apache.spark.sql.SparkSession import org.apache.spark.sql.DataFrame trait SparkSessionWrapper extends Serializable { lazy val spark: SparkSession = { createSparkSession(appname) } def appname : String def createSparkSession(appname : String) : SparkSession ={ SparkSession.builder().appName(appname).master("local[*]").getOrCreate() } //object with the main method and it extends SparkSessionWrapper object App extends SparkSessionWrapper { def main(args: Array[String]): Unit = { val readdf = ReadFileProcessor.ReadFile("testpath") readdf.createOrReplaceTempView("TestTable") val viewdf = spark.sql("Select * from TestTable") } override def appname: String = "ReadFile" } object ReadFileProcessor extends SparkSessionWrapper{ def ReadFile(path: String) : DataFrame = { val df = spark.read.format("csv").load(path) df } override def appname: String = "ReadcsvFile" } the only main difference is that you need to create an abstract function inside the trait and then you would have to override that into any of the startup class that you are using to provide the value.
stackoverflow
{ "language": "en", "length": 625, "provenance": "stackexchange_0000F.jsonl.gz:852207", "question_score": "21", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44502872" }
5de52c53adb271e9403f388a32ee78b2c51c2c1f
Stackoverflow Stackexchange Q: dockerize a wpf application and use it I'm trying to dockerize a wpf application. Just a simple app for that time. for now i got this dockerfile : FROM microsoft/nanoserver WORKDIR C run "$PATH" #ENTRYPOINT ["C:\Users\TestDev\Documents\Visual Studio 2017\Projects\TestExe\TestExe\bin\Release\TestExe.exe"] RUN ["C:\Users\TestDev\Documents\Visual Studio 2017\Projects\TestExe\TestExe\bin\Release\TestExe.exe"] Si i tried with entrypoint, run and cmd. But got this error : The filename, directory name, or volume label syntax is incorrect. And i would like know how it's work after run a container. Thanks. A: You cannot run a WPF application in docker. Here is a snippet of the Microsoft docs Docker is for server applications—Web sites, APIs, messaging solutions and other components that run in the background. You can’t run desktop apps in Docker because there’s no UI integration between the Docker platform and the Windows host. That rules out running Windows Forms or Windows Presentation Foundation (WPF) apps in containers (although you could use Docker to package and distribute those desktop apps), but Windows Communication Foundation (WCF), .NET console apps and all flavors of ASP.NET are great candidates. Check out the source
Q: dockerize a wpf application and use it I'm trying to dockerize a wpf application. Just a simple app for that time. for now i got this dockerfile : FROM microsoft/nanoserver WORKDIR C run "$PATH" #ENTRYPOINT ["C:\Users\TestDev\Documents\Visual Studio 2017\Projects\TestExe\TestExe\bin\Release\TestExe.exe"] RUN ["C:\Users\TestDev\Documents\Visual Studio 2017\Projects\TestExe\TestExe\bin\Release\TestExe.exe"] Si i tried with entrypoint, run and cmd. But got this error : The filename, directory name, or volume label syntax is incorrect. And i would like know how it's work after run a container. Thanks. A: You cannot run a WPF application in docker. Here is a snippet of the Microsoft docs Docker is for server applications—Web sites, APIs, messaging solutions and other components that run in the background. You can’t run desktop apps in Docker because there’s no UI integration between the Docker platform and the Windows host. That rules out running Windows Forms or Windows Presentation Foundation (WPF) apps in containers (although you could use Docker to package and distribute those desktop apps), but Windows Communication Foundation (WCF), .NET console apps and all flavors of ASP.NET are great candidates. Check out the source
stackoverflow
{ "language": "en", "length": 179, "provenance": "stackexchange_0000F.jsonl.gz:852210", "question_score": "9", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44502886" }
b1216efc1d04d5e5b6e34d55e57331899d9190ee
Stackoverflow Stackexchange Q: Defining a default constructor and a secondary constructor in Kotlin, with properties I'm trying to make a simple POJO (POKO?) class in Kotlin, with a default empty constructor and a secondary constructor with parameters, that feeds properties This doesn't give me firstName and lastName properties: class Person() { constructor(firstName: String?, lastName: String?) : this() } This gives me the properties, but they're not set after instantiation: class Person() { constructor(firstName: String?, lastName: String?) : this() var firstName: String? = null var lastName: String? = null } And this gives me a compile error saying "'var' on secondary constructor parameter is not allowed.": class Person() { constructor(var firstName: String?, var lastName: String?) : this() } So, how is this done? How can I have a default constructor and a secondary constructor with parameters and properties? A: You can have just a primary constructor with parameters that have default values: class Person(var firstName: String? = null, var lastName: String? = null)
Q: Defining a default constructor and a secondary constructor in Kotlin, with properties I'm trying to make a simple POJO (POKO?) class in Kotlin, with a default empty constructor and a secondary constructor with parameters, that feeds properties This doesn't give me firstName and lastName properties: class Person() { constructor(firstName: String?, lastName: String?) : this() } This gives me the properties, but they're not set after instantiation: class Person() { constructor(firstName: String?, lastName: String?) : this() var firstName: String? = null var lastName: String? = null } And this gives me a compile error saying "'var' on secondary constructor parameter is not allowed.": class Person() { constructor(var firstName: String?, var lastName: String?) : this() } So, how is this done? How can I have a default constructor and a secondary constructor with parameters and properties? A: You can have just a primary constructor with parameters that have default values: class Person(var firstName: String? = null, var lastName: String? = null) A: There are 2 ways to do this. Both require val and var in primary. Default Parameters In Primary class Person(var firstName: String? = null, var lastName: String? = null) Secondary Constructor Calling Primary class Person(var firstName: String?, var lastName: String?) { constructor() : this(null, null) { } }
stackoverflow
{ "language": "en", "length": 209, "provenance": "stackexchange_0000F.jsonl.gz:852212", "question_score": "7", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44502895" }
5df8390b8dcd4fcdfcc9e581fb617b98bdd775fa
Stackoverflow Stackexchange Q: Can I configure python to have matlab like print? Can I configure python to have matlab like print, so that when I just have a function returnObject() that it simply prints that object without me having to type print around it? I assume this is not easy, but something like if an object does not get bound by some other var it should get printed, so that this would work. a = 5 #prints nothing b = getObject() #prints nothing a #prints 5 b #prints getObject() getObject() #prints the object A: If you use an ipython notebook individual cells work like this. But you can only view one object per cell by typing the objects name. To see multiple objects you'd need to call print, or use lots of cells.
Q: Can I configure python to have matlab like print? Can I configure python to have matlab like print, so that when I just have a function returnObject() that it simply prints that object without me having to type print around it? I assume this is not easy, but something like if an object does not get bound by some other var it should get printed, so that this would work. a = 5 #prints nothing b = getObject() #prints nothing a #prints 5 b #prints getObject() getObject() #prints the object A: If you use an ipython notebook individual cells work like this. But you can only view one object per cell by typing the objects name. To see multiple objects you'd need to call print, or use lots of cells. A: You could write a script to modify the original script based on a set of rules that define what to print, then run the modified script. A basic script to do this would be: f = open('main.py', 'r') p = open('modified.py', 'w') p.write('def main(): \n') for line in f: temp = line if len(temp) == 1: temp = 'print(' + line + ')' p.write('\t' + temp) p.close() from modified import main main() The script main.py would then look like this: x = 236 x output: 236 A: Idea is as follows: parse AST of Python code, replace every expression with call to print and content of expression as argument and then run the modified version. I'm not sure whether it works with every code, but you might try. Save it as matlab.py and run your code as python3 -m matlab file.py. #!/usr/bin/env python3 import ast import os import sys class PrintAdder(ast.NodeTransformer): def add_print(self, node): print_func = ast.Name("print", ast.Load()) print_call = ast.Call(print_func, [node.value], []) print_statement = ast.Expr(print_call) return print_statement def visit_Expr(self, node): if isinstance(node.value, ast.Call) and node.value.func.id == 'print': return node return self.add_print(node) def main(): import argparse parser = argparse.ArgumentParser() parser.add_argument('infile', type=argparse.FileType(), nargs='?', default='-') args = parser.parse_args() with args.infile as infile: code = infile.read() file_name = args.infile.name tree = ast.parse(code, file_name, 'exec') tree = PrintAdder().visit(tree) tree = ast.fix_missing_locations(tree) bytecode = compile(tree, file_name, 'exec') exec(bytecode) if __name__ == '__main__': main()
stackoverflow
{ "language": "en", "length": 360, "provenance": "stackexchange_0000F.jsonl.gz:852216", "question_score": "3", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44502905" }
159821c22fe73a0a840edf8edfc6359237a59daa
Stackoverflow Stackexchange Q: Batch File Count all occurrences of a character within a string I have a version number 17.06.01.01 and I would like to know how many entries there are split by a period. My last piece of code was; setlocal ENABLEDELAYEDEXPANSION for /F "tokens=1-10 delims=." %%a in ("17.09.01.04.03") do ( set /a "iCount+=1" echo %%a, !iCount! ) endlocal I've tried a host of 'For' commands but seem to be getting further away each time. A: replace every dot with a space (as a delimiter). Then count number of tokens: @echo off set "string=17.09.01.04.03" set count=0 for %%a in (%string:.= %) do set /a count+=1 echo %count% (May give false results, if there are other spaces, commas or tabs in the string, but should work nice for your example of version strings containing only numbers and dots) @treintje: echo off set "string=1>7.0 9.0&1.04.0!3" set count=0 :again set "oldstring=%string%" set "string=%string:*.=%" set /a count+=1 if not "%string%" == "%oldstring%" goto :again echo %count%
Q: Batch File Count all occurrences of a character within a string I have a version number 17.06.01.01 and I would like to know how many entries there are split by a period. My last piece of code was; setlocal ENABLEDELAYEDEXPANSION for /F "tokens=1-10 delims=." %%a in ("17.09.01.04.03") do ( set /a "iCount+=1" echo %%a, !iCount! ) endlocal I've tried a host of 'For' commands but seem to be getting further away each time. A: replace every dot with a space (as a delimiter). Then count number of tokens: @echo off set "string=17.09.01.04.03" set count=0 for %%a in (%string:.= %) do set /a count+=1 echo %count% (May give false results, if there are other spaces, commas or tabs in the string, but should work nice for your example of version strings containing only numbers and dots) @treintje: echo off set "string=1>7.0 9.0&1.04.0!3" set count=0 :again set "oldstring=%string%" set "string=%string:*.=%" set /a count+=1 if not "%string%" == "%oldstring%" goto :again echo %count% A: For the sample string you provided, Stephan's approach is perfectly sufficient. However, if the string contains token separators (white-spaces, ,, ;, =), quotation marks ("), wild-cards (*, ?) or other special characters (^, &, (, ), >, <, |) it might probably fail. You could of course replace most of such characters by others in advance, and use delayed expansion rather than immediate one, but this still does not resolve all issues (for instance, you cannot replace all = or * characters due to the sub-string replacement syntax). The following approach however can deal with every arbitrary combination of all such characters. Basically it replaces every period (.) with a new-line (line-feed) character and lets find /C /V "" count the total number of lines. @echo off set "string=17.09.01.04.03" setlocal EnableDelayedExpansion if defined string (set ^"strtmp=!string:.=^ %= empty string =% !^") else set "strtmp=" for /F %%C in ('cmd /V /C echo(^^!strtmp^^!^| find /C /V ""') do set /A "count=%%C-1" echo "!string!" contains %count% periods. endlocal The periods become replaced by line-feeds in advance, using delayed expansion in order not to fail if any special characters occur. The for /F loop executes the command line cmd /V /C echo(^^!string^^!| find /C /V "" and captures its output in a variable, reduced by one as find /C /V "" actually returns the number of lines, which are separated by one less line-feeds (hence periods), originally. The double-escaped exclamation marks ^^! are needed in order to ensure that the variable strtmp is actually expanded within the explicitly invoked inner-most cmd instance, because otherwise, the contained multi-line string is not correctly transported into the pipe (|). A: A different approach comparing strLen before and after replacing the dots with nothing. @Echo off&SetLocal EnableExtensions EnableDelayedExpansion set "string=17.09.01.04.03" set "str2=%string:.=%" call :strlen string ret call :strlen str2 ret2 Set /A "Dots=ret-ret2" echo Number of dots in %string% is %Dots% goto :Eof :strLen string len :$source http://www.dostips.com/?t=Function.strLen (SETLOCAL ENABLEDELAYEDEXPANSION set "str=A!%~1!" set "len=0" for /L %%A in (12,-1,0) do (set /a "len|=1<<%%A" for %%B in (!len!) do if "!str:~%%B,1!"=="" set /a "len&=~1<<%%A") ) ENDLOCAL&IF "%~2" NEQ "" SET /a %~2=%len% EXIT /b Test with strings from comments: Pass: set "string=Tim says:"There is a cat and a dog"" Fail: set "string=1>7.0% 9.0&1.04.%0!3" Pass: set "string=Tim says:"There is a cat. And a dog""
stackoverflow
{ "language": "en", "length": 547, "provenance": "stackexchange_0000F.jsonl.gz:852217", "question_score": "4", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44502909" }
88eb045283600d4a45e2c7321a5b7d0cee9c1fc1
Stackoverflow Stackexchange Q: Unable to click on checkboxes using Robot Framework and selenium2library I used 3 different waits purposely to locate the element (a checkbox) on the page as shown below and they get passed. After that I click on that same element which also gets passed. Now my question is if the click element method gets passed then why does Checkbox should be selected fails because in click element method I am clicking on that checkbox only!! HTML screenshot. I have tried this clicking on checkbox multiple times using various strategies but it fails every time. Please help and suggest some solution!! Code I wrote: ` sleep 2 wait until page contains element id_service_levels_0 wait until element is enabled id=id_service_levels_0 wait until element is enabled id=id_service_levels_0 page should contain element id=id_service_levels_0 click element id=id_service_levels_0 checkbox should be selected id=id_service_levels_0 ` A: You can try with JavaScript executor as given below. JavascriptExecutor js = (JavascriptExecutor) driver; js.executeScript("document.getElementById('id_service_levels_0').click()");
Q: Unable to click on checkboxes using Robot Framework and selenium2library I used 3 different waits purposely to locate the element (a checkbox) on the page as shown below and they get passed. After that I click on that same element which also gets passed. Now my question is if the click element method gets passed then why does Checkbox should be selected fails because in click element method I am clicking on that checkbox only!! HTML screenshot. I have tried this clicking on checkbox multiple times using various strategies but it fails every time. Please help and suggest some solution!! Code I wrote: ` sleep 2 wait until page contains element id_service_levels_0 wait until element is enabled id=id_service_levels_0 wait until element is enabled id=id_service_levels_0 page should contain element id=id_service_levels_0 click element id=id_service_levels_0 checkbox should be selected id=id_service_levels_0 ` A: You can try with JavaScript executor as given below. JavascriptExecutor js = (JavascriptExecutor) driver; js.executeScript("document.getElementById('id_service_levels_0').click()");
stackoverflow
{ "language": "en", "length": 155, "provenance": "stackexchange_0000F.jsonl.gz:852226", "question_score": "3", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44502931" }
b6da521ef917d68e59187992ed1e53681d37aa6f
Stackoverflow Stackexchange Q: Angular 2 ng-build every time when code changed? I developing a MEAN app. In the past I only used Angular and ng-serve was a bless to try code out and develop. Now in combination with Node.js as a server, that loads Angular in my so-called "public" folder, I have to do "ng build" every time and it does go through all files, even if I just changed a little code on one component Typescript. My question? What can I do to develop faster using Node.js and Angular 2 without hitting every time "ng build" on changed code? A: I think ng build --watch can help.
Q: Angular 2 ng-build every time when code changed? I developing a MEAN app. In the past I only used Angular and ng-serve was a bless to try code out and develop. Now in combination with Node.js as a server, that loads Angular in my so-called "public" folder, I have to do "ng build" every time and it does go through all files, even if I just changed a little code on one component Typescript. My question? What can I do to develop faster using Node.js and Angular 2 without hitting every time "ng build" on changed code? A: I think ng build --watch can help. A: One way is you can use concurrently library and liteserver. Install them as dependencies in package.json Using concurrently run the typescript compiler and liteserver then page will be loaded automatically when ever the code is modified. { "scripts" : { "start" : "tsc && concurrently \"npm run tsc:w\" \"npm run lite\" ", "lite" : "lite-server", "tsc":"tsc", "tsc:w":"tsc -w" } "devDependencies":{ "concurently":"^2.0.0", "lite-server":"2.2.0" } } Include the above code in your package.json file and run npm install which will install the dependencies. Then run npm start which will start the server and compile the code when ever u change and reload the page
stackoverflow
{ "language": "en", "length": 209, "provenance": "stackexchange_0000F.jsonl.gz:852228", "question_score": "3", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44502938" }
cbd582e3aa7e8444e90855e1637fcaffbc8fe792
Stackoverflow Stackexchange Q: How to play a docker run --rm with docker-java? In our J2EE project, we want to call sjourdan/ffmpeg docker through the docker-java library. A typical call for ffmpeg conversion will be something like: docker run --rm -v /e/data:/tmp/workdir sjourdan/ffmpeg -i /tmp/workdir/test.mov -f mp4 -vcodec libx264 -acodec aac /tmp/workdir/test.mp4 We managed all of that with a DockerClient.createContainerCmd() and the right .with() methods, except for the --rm argument. Is there a way to add it through docker-java? A: According to this other StackOverflow question/answer, --rm is not handled by the Docker API, so we got no luck with docker-java either. So, in the end we carefully remove the container after the execution, something like: dockerClient.startContainerCmd(container.getId()).exec(); // do some stuff dockerClient.removeContainerCmd(container.getId()).withForce(true).exec();
Q: How to play a docker run --rm with docker-java? In our J2EE project, we want to call sjourdan/ffmpeg docker through the docker-java library. A typical call for ffmpeg conversion will be something like: docker run --rm -v /e/data:/tmp/workdir sjourdan/ffmpeg -i /tmp/workdir/test.mov -f mp4 -vcodec libx264 -acodec aac /tmp/workdir/test.mp4 We managed all of that with a DockerClient.createContainerCmd() and the right .with() methods, except for the --rm argument. Is there a way to add it through docker-java? A: According to this other StackOverflow question/answer, --rm is not handled by the Docker API, so we got no luck with docker-java either. So, in the end we carefully remove the container after the execution, something like: dockerClient.startContainerCmd(container.getId()).exec(); // do some stuff dockerClient.removeContainerCmd(container.getId()).withForce(true).exec(); A: You can send "AutoRemove" value to docker API: String containerId = dockerClient.createContainerCmd(image). withHostConfig(new HostConfig() { @JsonProperty("AutoRemove") public boolean autoRemove = true; }).exec().getId(); dockerClient.startContainerCmd(containerId).exec(); That's it Docker API info https://docs.docker.com/engine/api/v1.37/#operation/ContainerCreate "AutoRemove" was accepted, at least, starting at v1.25 A: For the current version, which is 3.2.8, you can set the autoremove option with: dockerClient.createContainerCmd(image).withHostConfig(new HostConfig().withAutoRemove(true)).exec()
stackoverflow
{ "language": "en", "length": 174, "provenance": "stackexchange_0000F.jsonl.gz:852231", "question_score": "3", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44502945" }
db051dd84ca62cf305706fef5dbf40720c25f2d5
Stackoverflow Stackexchange Q: RavenDB EsentOutOfLongValueIDsException The following error is intermittently thrown when attempting to add or update a document: "Microsoft.Isam.Esent.Interop.EsentOutOfLongValueIDsException: Long-value ID counter has reached maximum value. (perform offline defrag to reclaim free/unused LongValueIDs)" I've attempted to perform this offline defrag according to https://ravendb.net/docs/article-page/3.5/csharp/users-issues/recovering-from-esent-errors. I stopped the RavenDB service, navigated to the Databases folder in Adminstator command prompt and ran "esentutl /d DatabaseName". I then get the following error: "Access to source database 'DatabaseName' failed with Jet error -1032. Operation terminated with wrror -1032 after 20.31 seconds." I have also tried to restart the server with RavenDB not set to start on start-up. I still get error -1032 when attempting to defrag. Is performing the defrag operation the correct action? If so, what process(es) would I need to stop in order for those files to not be in use? Thanks! A: The solution was to run compact on raven. Raven studio > Manage Your Server > Compact. Compacting takes the database down, so I performed it on the replicated servers one at a time.
Q: RavenDB EsentOutOfLongValueIDsException The following error is intermittently thrown when attempting to add or update a document: "Microsoft.Isam.Esent.Interop.EsentOutOfLongValueIDsException: Long-value ID counter has reached maximum value. (perform offline defrag to reclaim free/unused LongValueIDs)" I've attempted to perform this offline defrag according to https://ravendb.net/docs/article-page/3.5/csharp/users-issues/recovering-from-esent-errors. I stopped the RavenDB service, navigated to the Databases folder in Adminstator command prompt and ran "esentutl /d DatabaseName". I then get the following error: "Access to source database 'DatabaseName' failed with Jet error -1032. Operation terminated with wrror -1032 after 20.31 seconds." I have also tried to restart the server with RavenDB not set to start on start-up. I still get error -1032 when attempting to defrag. Is performing the defrag operation the correct action? If so, what process(es) would I need to stop in order for those files to not be in use? Thanks! A: The solution was to run compact on raven. Raven studio > Manage Your Server > Compact. Compacting takes the database down, so I performed it on the replicated servers one at a time.
stackoverflow
{ "language": "en", "length": 172, "provenance": "stackexchange_0000F.jsonl.gz:852233", "question_score": "7", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44502961" }
01dcc0b0a26d7bb3972792cf1146c5234570fab4
Stackoverflow Stackexchange Q: Combining convolutional layers and LSTM layers with variable-length sequences I am trying to combine Conv2D layers with LSTM layers on images. The problem is that the Conv2D layers takes as input a 4D tensor including the number of channels, and my LSTM network needs a 3D tensor. The problem is that I use bucketing, so my inputs don't have a predefined number of timesteps. I wanted to do something like that : input_data = Input(shape=[None, nb_features, 1]) cnn1 = Conv2D(nb_filters, kernel_size)(input_data) cnn2 = Conv2D(nb_filters, kernel_size)(cnn1) reshape = Reshape(target_shape=[None, nb_features])(cnn2) gru1 = Bidirectional(GRU(rnn_size, return_sequences=True))(reshape) gru2 = Bidirectional(GRU(rnn_size, return_sequences=True))(gru1) out = TimeDistributed(Dense(nblabels))(gru2) output = Activation('softmax')(out) But the Reshape layer needs a fully defined shape. Is there a solution to this problem? A: My suggestion is that you use: input_data = Input((None, nb_features)) cnn1 = Conv1D(nb_filters, kernel_size)(input_data) cnn2 = Conv1D(nb_filters, kernel_size)(cnn1) gru1 = Bidirectional(GRU(rnn_size, return_sequences=True))(cnn2) gru2 = Bidirectional(GRU(rnn_size, return_sequences=True))(gru1) out = TimeDistributed(Dense(nblabels))(gru2) output = Activation('softmax')(out) Then you can either use a Masking Layer at the beginning (and pad your input data with a dummy value), or you can use many numpy batches, each batch with a different length, without a masking or padding.
Q: Combining convolutional layers and LSTM layers with variable-length sequences I am trying to combine Conv2D layers with LSTM layers on images. The problem is that the Conv2D layers takes as input a 4D tensor including the number of channels, and my LSTM network needs a 3D tensor. The problem is that I use bucketing, so my inputs don't have a predefined number of timesteps. I wanted to do something like that : input_data = Input(shape=[None, nb_features, 1]) cnn1 = Conv2D(nb_filters, kernel_size)(input_data) cnn2 = Conv2D(nb_filters, kernel_size)(cnn1) reshape = Reshape(target_shape=[None, nb_features])(cnn2) gru1 = Bidirectional(GRU(rnn_size, return_sequences=True))(reshape) gru2 = Bidirectional(GRU(rnn_size, return_sequences=True))(gru1) out = TimeDistributed(Dense(nblabels))(gru2) output = Activation('softmax')(out) But the Reshape layer needs a fully defined shape. Is there a solution to this problem? A: My suggestion is that you use: input_data = Input((None, nb_features)) cnn1 = Conv1D(nb_filters, kernel_size)(input_data) cnn2 = Conv1D(nb_filters, kernel_size)(cnn1) gru1 = Bidirectional(GRU(rnn_size, return_sequences=True))(cnn2) gru2 = Bidirectional(GRU(rnn_size, return_sequences=True))(gru1) out = TimeDistributed(Dense(nblabels))(gru2) output = Activation('softmax')(out) Then you can either use a Masking Layer at the beginning (and pad your input data with a dummy value), or you can use many numpy batches, each batch with a different length, without a masking or padding.
stackoverflow
{ "language": "en", "length": 191, "provenance": "stackexchange_0000F.jsonl.gz:852324", "question_score": "3", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44503222" }
c08994b5bbcef3b544138f413086331081edf9be
Stackoverflow Stackexchange Q: Python source deactivate has no effect I am attempting to deactivate my Python virtual environment. However, running source deactivate appears to have no effect: (general) machine:~ user$ source deactivate pyenv-virtualenv: deactivate 3.6.0/envs/general (general) machine:~ user$ Even after running the source deactivate command, I still appear to be in the general virtual environment. I also tried running just deactivate, but received the following message: pyenv-virtualenv: deactivate must be sourced. Run 'source deactivate' instead of 'deactivate' If it's relevant, I'm running macOS Sierra 10.12.4.
Q: Python source deactivate has no effect I am attempting to deactivate my Python virtual environment. However, running source deactivate appears to have no effect: (general) machine:~ user$ source deactivate pyenv-virtualenv: deactivate 3.6.0/envs/general (general) machine:~ user$ Even after running the source deactivate command, I still appear to be in the general virtual environment. I also tried running just deactivate, but received the following message: pyenv-virtualenv: deactivate must be sourced. Run 'source deactivate' instead of 'deactivate' If it's relevant, I'm running macOS Sierra 10.12.4.
stackoverflow
{ "language": "en", "length": 83, "provenance": "stackexchange_0000F.jsonl.gz:852388", "question_score": "3", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44503405" }
efd5344d1fc3fcea2c6313b910ec8452392c7721
Stackoverflow Stackexchange Q: Rails, how to specify which fields to output in a controller's JSON response? in my controller I currently have: invite = Invite.find_by_token(params[:id]) user = invite.user json_response({ user: user }) def json_response(object, status = :ok) render json: object, status: status end Right now, user is returning all user fields. I want to return just (id, email)... I've tried: user = invite.user.select(:id, :email) user = invite.user.pluck(:id, :email) neither works. Ideas? A: You can use the method as_json passing attributes you want in the response, like: user.as_json(only: [:id, :email])
Q: Rails, how to specify which fields to output in a controller's JSON response? in my controller I currently have: invite = Invite.find_by_token(params[:id]) user = invite.user json_response({ user: user }) def json_response(object, status = :ok) render json: object, status: status end Right now, user is returning all user fields. I want to return just (id, email)... I've tried: user = invite.user.select(:id, :email) user = invite.user.pluck(:id, :email) neither works. Ideas? A: You can use the method as_json passing attributes you want in the response, like: user.as_json(only: [:id, :email]) A: I know this question already has an answer, but there is also a nice gem you could use called active_model_serializers. This lets you specify exactly which properties you want in your JSON output for different models and even let's you include relationships to other models in your response. Gemfile: gem 'active_model_serializers', '~> 0.10.0' Then run bundle install. You can then create a serializer using the generator command: rails g serializer user which will create the serializer in project-root/app/serializers/. In your serializer, you can whitelist the attributes you would like: project-root/app/serializers/user_serializer.rb: class UserSerializer < ActiveModel::Serializer attributes :id, :email end Now any time you return a User object it will only output those two attributes, id and email. Want to print out related models? Easy. You can just add the relationship in your serializer and it will include those related models in your JSON output. Pretend a user "has many" posts: class UserSerializer < ActiveModel::Serializer attributes :id, :email has_many :posts end Now your JSON outputs should look something like: { "id": 1, "email": "[email protected]", "posts": [{ id: 1, title: "My First Post", body: "This is the post body.", created_at: "2017-05-18T20:03:14.955Z", updated_at: "2017-05-18T20:03:14.955Z" }, { id: 2, title: "My Second Post", body: "This is the post body again.", created_at: "2017-05-19T20:03:14.955Z", updated_at: "2017-05-19T20:03:14.955Z" }, ... ] } Pretty neat and convenient. And if you want to limit the the posts to only print certain columns as well, all you need to do is create a serializer for posts, specify the attributes, and the output will just work.
stackoverflow
{ "language": "en", "length": 341, "provenance": "stackexchange_0000F.jsonl.gz:852389", "question_score": "3", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44503407" }
64707c16bf51bdfd3819098eafac65600614691e
Stackoverflow Stackexchange Q: Destructuring nested objects as function parameters In ES6 we can do: let myFunc = ({name}) => { console.log(name) } myFunc({name:'fred'}) // => logs 'fred' But how do I do it for nested properties like this: myFunc({event:{target:{name:'fred'}}}) // => I want it to log 'fred' What should myFunc look like so that it logs 'fred'? I cannot change the object passed in. I wish to use destructuring to achieve this or some other suitable ES6 approach. A: You can simply do like this: const myFunc = ({event: {target: {name}}}) => { console.log(name) } myFunc({event: {target: {name: 'fred'}}}) .as-console-wrapper { max-height: 100% !important; top: 0; } Here is an other implementation, with both in parameters, but the second is entirely optionnal: const myFunc = ( {name: name}, {event: {target: {name: eventTargetName = ''} = ''} = ''} = '' ) => { console.log(name, eventTargetName) } myFunc({name:'fred'}) myFunc({name:'papi'}, {event: {target: {name: 'fredo'}}}) .as-console-wrapper { max-height: 100% !important; top: 0; }
Q: Destructuring nested objects as function parameters In ES6 we can do: let myFunc = ({name}) => { console.log(name) } myFunc({name:'fred'}) // => logs 'fred' But how do I do it for nested properties like this: myFunc({event:{target:{name:'fred'}}}) // => I want it to log 'fred' What should myFunc look like so that it logs 'fred'? I cannot change the object passed in. I wish to use destructuring to achieve this or some other suitable ES6 approach. A: You can simply do like this: const myFunc = ({event: {target: {name}}}) => { console.log(name) } myFunc({event: {target: {name: 'fred'}}}) .as-console-wrapper { max-height: 100% !important; top: 0; } Here is an other implementation, with both in parameters, but the second is entirely optionnal: const myFunc = ( {name: name}, {event: {target: {name: eventTargetName = ''} = ''} = ''} = '' ) => { console.log(name, eventTargetName) } myFunc({name:'fred'}) myFunc({name:'papi'}, {event: {target: {name: 'fredo'}}}) .as-console-wrapper { max-height: 100% !important; top: 0; } A: Try this: let myFunc = ({ event: { target: { name } } }) => { console.log(name); }; myFunc({ event: { target: { name:'fred' } } }); // => logs 'fred' See also examples on MDN. A: You can do: let myFunc = ( obj ) => { console.log(obj.event.target.name); }; myFunc({ event: { target: { name: 'fred' } } }); Or: let myFunc = ( {event: {target: { name } } } ) => { console.log(name); }; myFunc({ event: { target: { name: 'fred' } } });
stackoverflow
{ "language": "en", "length": 246, "provenance": "stackexchange_0000F.jsonl.gz:852391", "question_score": "8", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44503409" }
954c34571a7058ad632b136d8366893ebeadf4b0
Stackoverflow Stackexchange Q: Python Shell expected an indented block I tried this code at the interpreter prompt: >>> x = 7 >>> if x > 5: ... print("five") I got this error message: File "<stdin>", line 2 print("five") IndentationError: expected an indented block Why? A: In the shell you should do this: >>> if x > 5: ... print("five") That is, add an indent block after the ":"
Q: Python Shell expected an indented block I tried this code at the interpreter prompt: >>> x = 7 >>> if x > 5: ... print("five") I got this error message: File "<stdin>", line 2 print("five") IndentationError: expected an indented block Why? A: In the shell you should do this: >>> if x > 5: ... print("five") That is, add an indent block after the ":" A: Indentation is important in Python. You need to indent your print statement with a few spaces or a tab otherwise the interpreter will think your if is empty. >>> if x > 5: ... print("five")
stackoverflow
{ "language": "en", "length": 102, "provenance": "stackexchange_0000F.jsonl.gz:852407", "question_score": "3", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44503446" }
dcd50eefe1977f7c73f01287f01088bbeef42055
Stackoverflow Stackexchange Q: How to use RazorEngine to include files? I am trying to use RazorEngine to include other files in a template, but I am a little stuck. I got the basics working, but I want to be able to use @Include("somefile.html") in my template. This is what I got until now: string tpl = @"@Include(""foo.html"");"; ResolvePathTemplateManager r = new ResolvePathTemplateManager(new string[] { "html" }); var config = new TemplateServiceConfiguration(); config.TemplateManager = r; var service = RazorEngineService.Create(config); var a = service.RunCompile(tpl, "name", null, new { test = "TEMPLATE" }); The current working directory has a html dir where foo.html is located, yet I get this error: Could not resolve template name A: Apperently, when resolving paths, you cannot use a string-template. This code works: ResolvePathTemplateManager r = new ResolvePathTemplateManager(new string[] { "html" }); var config = new TemplateServiceConfiguration(); config.TemplateManager = r; var service = RazorEngineService.Create(config); var a = service.RunCompile("foo.html"); and in foo.html I can use: @Include("otherfile.html"); to include a file from the same dir.
Q: How to use RazorEngine to include files? I am trying to use RazorEngine to include other files in a template, but I am a little stuck. I got the basics working, but I want to be able to use @Include("somefile.html") in my template. This is what I got until now: string tpl = @"@Include(""foo.html"");"; ResolvePathTemplateManager r = new ResolvePathTemplateManager(new string[] { "html" }); var config = new TemplateServiceConfiguration(); config.TemplateManager = r; var service = RazorEngineService.Create(config); var a = service.RunCompile(tpl, "name", null, new { test = "TEMPLATE" }); The current working directory has a html dir where foo.html is located, yet I get this error: Could not resolve template name A: Apperently, when resolving paths, you cannot use a string-template. This code works: ResolvePathTemplateManager r = new ResolvePathTemplateManager(new string[] { "html" }); var config = new TemplateServiceConfiguration(); config.TemplateManager = r; var service = RazorEngineService.Create(config); var a = service.RunCompile("foo.html"); and in foo.html I can use: @Include("otherfile.html"); to include a file from the same dir.
stackoverflow
{ "language": "en", "length": 163, "provenance": "stackexchange_0000F.jsonl.gz:852429", "question_score": "3", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44503522" }
87203ce9484ffe26310912fc6d1edae9cbcad4ea
Stackoverflow Stackexchange Q: Android Studio Gradle Project Sync - Failed To Resolve Espresso-Core and Appcompat I am following the 'First App' tutorial found at https://developer.android.com/training/basics/firstapp/index.html. However, on creating a default 'Empty activity' project and trying to build it, I receive the following errors: * *Error:(23, 24) Failed to resolve: com.android.support.test:espresso-core:2.2.2 *Error:(26, 13) Failed to resolve: com.android.support:appcompat-v7:26.+ My (default / autogenerated) build.gradle: apply plugin: 'com.android.application' android { compileSdkVersion 26 buildToolsVersion "26.0.0" defaultConfig { applicationId "com.example.myfirstapp" minSdkVersion 15 targetSdkVersion 26 versionCode 1 versionName "1.0" testInstrumentationRunner "android.support.test.runner.AndroidJUnitRunner" } buildTypes { release { minifyEnabled false proguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.pro' } } } dependencies { compile fileTree(dir: 'libs', include: ['*.jar']) androidTestCompile('com.android.support.test.espresso:espresso-core:2.2.2', { exclude group: 'com.android.support', module: 'support-annotations' }) compile 'com.android.support:appcompat-v7:26.+' compile 'com.android.support.constraint:constraint-layout:1.0.2' testCompile 'junit:junit:4.12' } Although I have looked at several related Stack Overflow questions, I am yet to find a solution that works. I am using a fresh, default install of Android Studio 2.3.3. A: You should check if your repository includes new google maven repo. Something like: allprojects { repositories { jcenter() maven { url "https://maven.google.com" } } } See docs for details.
Q: Android Studio Gradle Project Sync - Failed To Resolve Espresso-Core and Appcompat I am following the 'First App' tutorial found at https://developer.android.com/training/basics/firstapp/index.html. However, on creating a default 'Empty activity' project and trying to build it, I receive the following errors: * *Error:(23, 24) Failed to resolve: com.android.support.test:espresso-core:2.2.2 *Error:(26, 13) Failed to resolve: com.android.support:appcompat-v7:26.+ My (default / autogenerated) build.gradle: apply plugin: 'com.android.application' android { compileSdkVersion 26 buildToolsVersion "26.0.0" defaultConfig { applicationId "com.example.myfirstapp" minSdkVersion 15 targetSdkVersion 26 versionCode 1 versionName "1.0" testInstrumentationRunner "android.support.test.runner.AndroidJUnitRunner" } buildTypes { release { minifyEnabled false proguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.pro' } } } dependencies { compile fileTree(dir: 'libs', include: ['*.jar']) androidTestCompile('com.android.support.test.espresso:espresso-core:2.2.2', { exclude group: 'com.android.support', module: 'support-annotations' }) compile 'com.android.support:appcompat-v7:26.+' compile 'com.android.support.constraint:constraint-layout:1.0.2' testCompile 'junit:junit:4.12' } Although I have looked at several related Stack Overflow questions, I am yet to find a solution that works. I am using a fresh, default install of Android Studio 2.3.3. A: You should check if your repository includes new google maven repo. Something like: allprojects { repositories { jcenter() maven { url "https://maven.google.com" } } } See docs for details. A: Used compile 'com.android.support:appcompat-v7:26.0.0-alpha1' A: Modify the dependancy to this compile 'com.android.support:appcompat-v7:25.3.1' Using + might cause unstable builds. Goto SDK Manager and Download/Update all required tools and platforms. That should solve the error. A: There are some changes in new gradle 4.1 instead of compile we should use implementation in build.gradle file *implementation 'com.android.support:appcompat-v7:26.0.0'* gradle class path is *classpath 'com.android.tools.build:gradle:3.0.0-alpha8'* A: Thanks for your time, I have since fixed the problem! Although I am unsure as to what caused it, completely deleting and reinstalling Android Studio and the Java SDK has made the errors go away. I believe it may have been conflicting with some android software I installed a long time ago! Thanks again for your suggestions, Oli
stackoverflow
{ "language": "en", "length": 296, "provenance": "stackexchange_0000F.jsonl.gz:852430", "question_score": "4", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44503523" }
53c054afab813e02026975d638693f18b4d602c3
Stackoverflow Stackexchange Q: Stripe Connect account id, customer id security issues I am just wondering if it is okay to make Stripe account id (for Connect) and customer id visible on browser. Is there any way that a person other than its holder misuses by obtaining the ids? (for example, moving earned money to their account or change the person's information as a method of attacking) A: I think it's perfectly fine to expose stripe accountIds to the client. Like others have said, these ids are useless to an attacker unless the attacker gets your secret key. Other answers argue that exposed ids + compromised secret make it easier for an attacker. That's true, but if the attacker has your secret, you're screwed anyway. It takes 1 GET request, to list all accountIds.
Q: Stripe Connect account id, customer id security issues I am just wondering if it is okay to make Stripe account id (for Connect) and customer id visible on browser. Is there any way that a person other than its holder misuses by obtaining the ids? (for example, moving earned money to their account or change the person's information as a method of attacking) A: I think it's perfectly fine to expose stripe accountIds to the client. Like others have said, these ids are useless to an attacker unless the attacker gets your secret key. Other answers argue that exposed ids + compromised secret make it easier for an attacker. That's true, but if the attacker has your secret, you're screwed anyway. It takes 1 GET request, to list all accountIds. A: All sensitive information coming from the account id requires your Stripe Secret Key (or that of the account, for Connect accounts). That being said, if access to those keys is compromised, having the account ids readily available just makes it that much easier for an attacker. With public keys, I'm not sure you can even add a card directly to a user. Just create the card tokens and what not. A: I consider stripe connected account id similar to an email address. The email address needs to be publicly known in order for people to email you. If they want to read your emails they need your login (eg. password).
stackoverflow
{ "language": "en", "length": 242, "provenance": "stackexchange_0000F.jsonl.gz:852439", "question_score": "3", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44503552" }
3c85e144dfd15589455c45fecbb498cbe4fa5ce6
Stackoverflow Stackexchange Q: Selenium + Python: How to stop page loading when certain element gets loaded? The implicit and explicit waits can be used when the page uses AJAX, but I want to stop the loading caused by driver.get() when sufficient elements are loaded. Is it possible to do so because of the driver.get() call returns only when the page finishes loading. A: Yes it's possible by setting the pageLoadStrategy capability to none. Then wait for an element to be present and call window.stop to stop the loading: from selenium import webdriver from selenium.webdriver.common.desired_capabilities import DesiredCapabilities from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.by import By capa = DesiredCapabilities.CHROME capa["pageLoadStrategy"] = "none" driver = webdriver.Chrome(desired_capabilities=capa) wait = WebDriverWait(driver, 20) driver.get('http://stackoverflow.com/') wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '#h-top-questions'))) driver.execute_script("window.stop();")
Q: Selenium + Python: How to stop page loading when certain element gets loaded? The implicit and explicit waits can be used when the page uses AJAX, but I want to stop the loading caused by driver.get() when sufficient elements are loaded. Is it possible to do so because of the driver.get() call returns only when the page finishes loading. A: Yes it's possible by setting the pageLoadStrategy capability to none. Then wait for an element to be present and call window.stop to stop the loading: from selenium import webdriver from selenium.webdriver.common.desired_capabilities import DesiredCapabilities from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.by import By capa = DesiredCapabilities.CHROME capa["pageLoadStrategy"] = "none" driver = webdriver.Chrome(desired_capabilities=capa) wait = WebDriverWait(driver, 20) driver.get('http://stackoverflow.com/') wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '#h-top-questions'))) driver.execute_script("window.stop();")
stackoverflow
{ "language": "en", "length": 125, "provenance": "stackexchange_0000F.jsonl.gz:852447", "question_score": "28", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44503576" }
176922659da30c397cf94860cd7616461251a98c
Stackoverflow Stackexchange Q: javascript: How to URL encode strings that contain dashes (-)? I have a string that represents that name of an entity. The name of this entity has a dash in it (-). How can I encode the dash? I believe it should be encoded to '%2D' but when I try encodeURIComponent() or encodeURI() for that matter, it doesn't encode the dash. I was just wondering if there is an encode function that will encode the dash rather than just doing a .replace A: - is a character that appears in the ASCII character set and has no special meaning in URLs. While you can encode it as %2D, doing so is not needed nor is it normal. Encoding it would be like using %61 instead of a. There is no standard encoding function that will encode a - character. replace is the logical choice if you really, really want to.
Q: javascript: How to URL encode strings that contain dashes (-)? I have a string that represents that name of an entity. The name of this entity has a dash in it (-). How can I encode the dash? I believe it should be encoded to '%2D' but when I try encodeURIComponent() or encodeURI() for that matter, it doesn't encode the dash. I was just wondering if there is an encode function that will encode the dash rather than just doing a .replace A: - is a character that appears in the ASCII character set and has no special meaning in URLs. While you can encode it as %2D, doing so is not needed nor is it normal. Encoding it would be like using %61 instead of a. There is no standard encoding function that will encode a - character. replace is the logical choice if you really, really want to.
stackoverflow
{ "language": "en", "length": 151, "provenance": "stackexchange_0000F.jsonl.gz:852449", "question_score": "3", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44503579" }
13bfb7e2aa48839ba439f707ca9ab39faa221f63
Stackoverflow Stackexchange Q: javascript to add aria attribute I'm trying to add aria-required="true" attributes to some form elements from ninja forms in wordpress. I'm using a header/footer script inject plugin. But I can't seem to get my code to actually work. Any help would be greatly appreciated! http://champion.magnet.today/contact/ <script> function codeAddress() { var x = document.getElementsByClassName("nf-element"); var i; for (i = 0; i < x.length; i++) { x[i].addAttribute("aria-required", "true"); } window.onload = codeAddress } </script> <div class="nf-field-element"> <input id="nf-field-17" name="nf-field-17" class="ninja-forms-field nf-element" type="text" value="" placeholder="First Name"> </div> A: You need to use setAttribute. Also window.load should be outside the codeAddress function function codeAddress() { var x = document.getElementsByClassName("nf-element"); var i; for (i = 0; i < x.length; i++) { console.log(x[i]) x[i].setAttribute("aria-required", "true"); } } window.onload = codeAddress <div class="nf-field-element"> <input id="nf-field-17" name="nf-field-17" class="ninja-forms-field nf-element" type="text" value="" placeholder="First Name"> </div>
Q: javascript to add aria attribute I'm trying to add aria-required="true" attributes to some form elements from ninja forms in wordpress. I'm using a header/footer script inject plugin. But I can't seem to get my code to actually work. Any help would be greatly appreciated! http://champion.magnet.today/contact/ <script> function codeAddress() { var x = document.getElementsByClassName("nf-element"); var i; for (i = 0; i < x.length; i++) { x[i].addAttribute("aria-required", "true"); } window.onload = codeAddress } </script> <div class="nf-field-element"> <input id="nf-field-17" name="nf-field-17" class="ninja-forms-field nf-element" type="text" value="" placeholder="First Name"> </div> A: You need to use setAttribute. Also window.load should be outside the codeAddress function function codeAddress() { var x = document.getElementsByClassName("nf-element"); var i; for (i = 0; i < x.length; i++) { console.log(x[i]) x[i].setAttribute("aria-required", "true"); } } window.onload = codeAddress <div class="nf-field-element"> <input id="nf-field-17" name="nf-field-17" class="ninja-forms-field nf-element" type="text" value="" placeholder="First Name"> </div> A: try replacing x[i].addAttribute("aria-required", "true"); with x[i].setAttribute("aria-required", "true");
stackoverflow
{ "language": "en", "length": 145, "provenance": "stackexchange_0000F.jsonl.gz:852490", "question_score": "4", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44503719" }
f6237c02008facfa9689697f8a7737caa2d650e7
Stackoverflow Stackexchange Q: Using Kafka with Netflix Conductor I want to know if there is an easy way to connect Kafka and Netflix Conductor (instead of SQS)? At now, it seems to work only with Amazon SQS. Moreover, it seems that it is only possible to make one action by task. Is there a way to execute more than one action by task? Thanks in advance, A: To add Kafka support to Netflix Conductor, you will need to * *Create a module in contribs that extends AbstractModule (Add an entry in your server.properties against conductor.additional.modules property) *Implement ObservableQueue for kafka producer and consumer operations. *Implement EventQueueProvider just like SQS implementation *Add properties for your kafka initialization in server.properties kafka.producer.bootstrap.servers=host1:port1;host2:port2 kafka.producer.key.serializer=org.apache.kafka.common.serialization.StringSerializer kafka.producer.value.serializer=org.apache.kafka.common.serialization.StringSerializer kafka.consumer.bootstrap.servers=host1:port1;host2:port2 kafka.consumer.key.deserializer=org.apache.kafka.common.serialization.StringDeserializer kafka.consumer.value.deserializer=org.apache.kafka.common.serialization.StringDeserializer * *Add kafka library support to the project by updating the build.gradle of contribs modules. Refer to the below PR link for implementation (Authored by preeth-gopalakrishnan) https://github.com/Netflix/conductor/pull/672 (If you don't find the PR, assume it is merged to the master)
Q: Using Kafka with Netflix Conductor I want to know if there is an easy way to connect Kafka and Netflix Conductor (instead of SQS)? At now, it seems to work only with Amazon SQS. Moreover, it seems that it is only possible to make one action by task. Is there a way to execute more than one action by task? Thanks in advance, A: To add Kafka support to Netflix Conductor, you will need to * *Create a module in contribs that extends AbstractModule (Add an entry in your server.properties against conductor.additional.modules property) *Implement ObservableQueue for kafka producer and consumer operations. *Implement EventQueueProvider just like SQS implementation *Add properties for your kafka initialization in server.properties kafka.producer.bootstrap.servers=host1:port1;host2:port2 kafka.producer.key.serializer=org.apache.kafka.common.serialization.StringSerializer kafka.producer.value.serializer=org.apache.kafka.common.serialization.StringSerializer kafka.consumer.bootstrap.servers=host1:port1;host2:port2 kafka.consumer.key.deserializer=org.apache.kafka.common.serialization.StringDeserializer kafka.consumer.value.deserializer=org.apache.kafka.common.serialization.StringDeserializer * *Add kafka library support to the project by updating the build.gradle of contribs modules. Refer to the below PR link for implementation (Authored by preeth-gopalakrishnan) https://github.com/Netflix/conductor/pull/672 (If you don't find the PR, assume it is merged to the master)
stackoverflow
{ "language": "en", "length": 162, "provenance": "stackexchange_0000F.jsonl.gz:852501", "question_score": "4", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44503759" }
fa895e11ef2cda1e6714bcb495204c58e88382df
Stackoverflow Stackexchange Q: Is "constexpr if" better than switch statement? C++17 introduces "constexpr if" that is instantiated depending on a compile-time condition. Does it mean that it's better to use "constexpr if" in template functions rather than switch statement? For example: template<int val> void func() { if constexpr(val == 0) {} else if constexpr(val == 1) {} else ... if constexpr(val == k) {} else {} } // vs template<int val> void func() { switch (val) { case 0: break; case 1: break; ... case k: break; default: break; } } A: if constexpr was introduced to eliminate certain branches that are ill-formed if the condition is false. In your case, you are only doing some operations on an int, so no branch should be ill-formed if another one is well-formed. It doesn't really make sense to use it. As said above, using if constexpr provides no real advantages besides guaranteeing that the compiler will remove every other branch. But I expect a good compiler with optimizations to do that too with the switch, as val is a constant when func is instantiated. I would use the switch statement, but that's just me. So, choose the one you like.
Q: Is "constexpr if" better than switch statement? C++17 introduces "constexpr if" that is instantiated depending on a compile-time condition. Does it mean that it's better to use "constexpr if" in template functions rather than switch statement? For example: template<int val> void func() { if constexpr(val == 0) {} else if constexpr(val == 1) {} else ... if constexpr(val == k) {} else {} } // vs template<int val> void func() { switch (val) { case 0: break; case 1: break; ... case k: break; default: break; } } A: if constexpr was introduced to eliminate certain branches that are ill-formed if the condition is false. In your case, you are only doing some operations on an int, so no branch should be ill-formed if another one is well-formed. It doesn't really make sense to use it. As said above, using if constexpr provides no real advantages besides guaranteeing that the compiler will remove every other branch. But I expect a good compiler with optimizations to do that too with the switch, as val is a constant when func is instantiated. I would use the switch statement, but that's just me. So, choose the one you like.
stackoverflow
{ "language": "en", "length": 197, "provenance": "stackexchange_0000F.jsonl.gz:852580", "question_score": "4", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44504025" }
b48b9fd8d11e158a02ca1252e831f8f0cf751317
Stackoverflow Stackexchange Q: pass QueryString Paramaters from API Gateway to AWS Lambda c# i am trying to call AWS Lambda using APIGateway and it returns HTML Code. it works fine when i dont pass any parameters, but i want to pass some QueryString parameters and use them in Lambda. i have my Lambda in C# and i see parameters being passed from API response from API "headers": {}, "QueryStringParameters": { "Environment": "xzc" }, "PathParameters": {} } In Lambda, the APIGatewayProxyRequest is coming as null API Lambda public string FunctionHandler(APIGatewayProxyRequest request, ILambdaContext context) how do i read the querystring parameters in AWS Lambda in C# A: Looks like you just need to check Use Lambda Proxy integration in Integration Request in your API Gateway resource config. also you should include: using Amazon.Lambda.APIGatewayEvents; and your handler function header should like something like this: public APIGatewayProxyResponse FunctionHandler( APIGatewayProxyRequest input, ILambdaContext context) then you can access your query string parameters with: input.QueryStringParameters
Q: pass QueryString Paramaters from API Gateway to AWS Lambda c# i am trying to call AWS Lambda using APIGateway and it returns HTML Code. it works fine when i dont pass any parameters, but i want to pass some QueryString parameters and use them in Lambda. i have my Lambda in C# and i see parameters being passed from API response from API "headers": {}, "QueryStringParameters": { "Environment": "xzc" }, "PathParameters": {} } In Lambda, the APIGatewayProxyRequest is coming as null API Lambda public string FunctionHandler(APIGatewayProxyRequest request, ILambdaContext context) how do i read the querystring parameters in AWS Lambda in C# A: Looks like you just need to check Use Lambda Proxy integration in Integration Request in your API Gateway resource config. also you should include: using Amazon.Lambda.APIGatewayEvents; and your handler function header should like something like this: public APIGatewayProxyResponse FunctionHandler( APIGatewayProxyRequest input, ILambdaContext context) then you can access your query string parameters with: input.QueryStringParameters A: Explaining for more than 1 input parameters as sometimes that is also a problem to developers: Step 01: This should be your C-Sharp method public string FunctionHandler(string strEnvironmentA, string strEnvironmentB, ILambdaContext context); Step 02: In API > GET Method Execution > Method Request add query string parameter for * *strEnvironmentA *strEnvironmentB Step 03: In API > GET Method Execution > Integration Request > Body Mapping Template add this application/json template "$input.params('strEnvironmentA')" "$input.params('strEnvironmentB')" A: Do something like this: public string FunctionHandler(string input, ILambdaContext context); And then you can pass the input in the request body instead of query string params. A: if (request.QueryStringParameters != null) { queryStringParameters = request.QueryStringParameters; foreach (var item in queryStringParameters) { Console.WriteLine($"QueryStringParameter - " + item.Key + ":" + item.Value); } }
stackoverflow
{ "language": "en", "length": 282, "provenance": "stackexchange_0000F.jsonl.gz:852610", "question_score": "4", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44504122" }
c6778358fef44d700eca299a08cbcf4d0c4e4bcf
Stackoverflow Stackexchange Q: java access time to variables Suppose we have a class Const.java containing 1000 String constants: public static final String foo1 = "foo1"; public static final String foo2 = "foo2"; ... public static final String foo1000 = "foo1000"; Now, some method in another class executes String s = Const.foo1000; Does access time of variables depend on the number of such variables? (That is, if there were 1,000,000 Strings in Const.java, would code run at the same speed?) A: The access time will be always the same. Your class is loaded using the the classloader into RAM memory when the application starts. Constants (static/final) are stored in a memory position that is replaced in your code at compile time wherever it's used. The only difference that you should notice is at the start time of your application, that will be proportional to the amount of variables that you have in the class. Accessing a memory position is always O(1), like retrieving an object from a HashMap.
Q: java access time to variables Suppose we have a class Const.java containing 1000 String constants: public static final String foo1 = "foo1"; public static final String foo2 = "foo2"; ... public static final String foo1000 = "foo1000"; Now, some method in another class executes String s = Const.foo1000; Does access time of variables depend on the number of such variables? (That is, if there were 1,000,000 Strings in Const.java, would code run at the same speed?) A: The access time will be always the same. Your class is loaded using the the classloader into RAM memory when the application starts. Constants (static/final) are stored in a memory position that is replaced in your code at compile time wherever it's used. The only difference that you should notice is at the start time of your application, that will be proportional to the amount of variables that you have in the class. Accessing a memory position is always O(1), like retrieving an object from a HashMap. A: Yes, it would run at the same speed. An important reason is that the constants are all resolved at compile time, not at run time. Any static final field consisting only of literals, or values of other static final fields which consist only of literals, is analyzed when the code is compiled. In fact, if you were to decompile the assigment, you would see: String s = "foo1000"; // No reference whatsoever to Const
stackoverflow
{ "language": "en", "length": 240, "provenance": "stackexchange_0000F.jsonl.gz:852614", "question_score": "3", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44504135" }
4802037383d15bacc7a380a0396a059ffc2c904c
Stackoverflow Stackexchange Q: Should I use panic or return error? Go provides two ways of handling errors, but I'm not sure which one to use. Assuming I'm implementing a classic ForEach function which accepts a slice or a map as an argument. To check whether an iterable is passed in, I could do: func ForEach(iterable interface{}, f interface{}) { if isNotIterable(iterable) { panic("Should pass in a slice or map!") } } or func ForEach(iterable interface{}, f interface{}) error { if isNotIterable(iterable) { return fmt.Errorf("Should pass in a slice or map!") } } I saw some discussions saying panic() should be avoided, but people also say that if program cannot recover from error, you should panic(). Which one should I use? And what's the main principle for picking the right one? A: You should assume that a panic will be immediately fatal, for the entire program, or at the very least for the current goroutine. Ask yourself "when this happens, should the application immediately crash?" If yes, use a panic; otherwise, use an error.
Q: Should I use panic or return error? Go provides two ways of handling errors, but I'm not sure which one to use. Assuming I'm implementing a classic ForEach function which accepts a slice or a map as an argument. To check whether an iterable is passed in, I could do: func ForEach(iterable interface{}, f interface{}) { if isNotIterable(iterable) { panic("Should pass in a slice or map!") } } or func ForEach(iterable interface{}, f interface{}) error { if isNotIterable(iterable) { return fmt.Errorf("Should pass in a slice or map!") } } I saw some discussions saying panic() should be avoided, but people also say that if program cannot recover from error, you should panic(). Which one should I use? And what's the main principle for picking the right one? A: You should assume that a panic will be immediately fatal, for the entire program, or at the very least for the current goroutine. Ask yourself "when this happens, should the application immediately crash?" If yes, use a panic; otherwise, use an error. A: I like the way it's done in some libraries where on top of a regular method DoSomething, its "panicky" version is added with MustDoSomething. I'm relatively new to go, but I've already seen it in several places, notably sqlx. In general, if you want to expose your code to someone else, you should either have Must- and a regular version of the method, or your methods/functions should give the client a chance to recover the way they want and so error should be available to them in a go-idiomatic way. Having said that, I agree that if your API/library is used inappropriately, it's Ok to panic as well. As a matter of fact, I've also seen methods like MustGetenv() that will panic if a critical env.var is missing. Fail-fast mechanism basically. A: If some mandatory requirement is not provided or not there while starting the service (eg. database connection, some service configuration which is required) then you should use panic. There should be return error for any user response or server side error. A: Ask yourself these questions: * *Do you expect the exceptional situation to occur, regardless how well would you code your app? Do you think it should be useful to make the user aware of such condition as part of the normal usage of your app? Handle it as an error, because it concerns the application as working normally. *Should that exceptional situation NOT occur if you code appropriately (and somewhat defensively)? (example: dividing by zero, or accessing an array element out of bounds) Is your app totally clueless under that error? Panic. *Do you have your API and want to ensure users use it appropriately? Panic. Your API will seldom recover if used incorrectly. A: Use panic. Because your use case is to catch a bad use of your API. This should never happen at runtime if the program is calling your API properly. In fact, any program calling your API with correct arguments will behave in the same way if the test is removed. The test is there only to fail early with an error message helpful to the programmer that did the mistake. Ideally, the panic might be reached once during development when running the testsuite and the programmer would fix the call even before committing the bad code, and that incorrect use would never reach production. See also this reponse to question Is function parameter validation using errors a good pattern in Go?. A: Use error whenever possible Only use panic when your code could end up in a bad state that would be prone to crashing; something truly unexpected. The example above with ForEach() is an exported func that accepts an interface so it should expect someone will improperly call it. And if it is improperly called, you know why you cannot continue and you know how to handle that error. isNotIterable is literally binary and easy to control. But error is not like a try/catch Even if you try to justify panic/recover by looking at throw/catch from other languages, you still use errors. We know you are trying the function because you are calling it, we know there was an error because err != nil, and just like checking the type of exception thrown you can check the type of error returned with errors.Is(err, ErrNotIterable) So should you use panic for errors in concurrency? The answer is still most likely no. Errors are still the preferred way in Go and you can use a wait group to shut down the goroutines: ctx, cancel := context.WithTimeout(context.Background(), time.Minute*5) // automatically cancel in 5 min defer cancel() errGroup, ctx := errgroup.WithContext(ctx) errGroup.Go(func() error { // do crazy stuff; you can still check for errors if ... { return fmt.Errorf("critical error, stopping all goroutines now") } // code completed without issues return nil }) err = errGroup.Wait() Even using the structure of the original example, you still have better control with errors than panics: func ForEach(iterable interface{}, f interface{}) error { if isNotIterable(iterable) { return fmt.Errorf("expected something iterable but got %v", reflect.ValueOf(iterable).String()) } switch v.Kind() { case reflect.Map: ... case reflect.Array, reflect.Slice: ... default: return fmt.Errorf("isNotIterable is false but I do not know how to iterate through %v", reflect.ValueOf(iterable).String()) } But error feels very verbose Yes, that is the point. When an error is returned, it is at that point to do something about it. You are giving the calling code options rather than making the decision to start shutting down and killing the application unless you recover(). If you are just returning the same error all the way up the call stack then error will seem inferior to panic, but this is due to not addressing issues when they happen. So when to use panic? When your code is on a collision course to crash and you cannot assume your way out of it. Another is when the code assumes something that is no longer true and having to check the integrity in every function from here on out would be tedious (and might impact performance). Still, you would use panic() only to get out of the layers of uncertainty... then still handle errors: func ForEach(iterable interface{}, f interface{}) error { defer func() { if r := recover(); r != nil { err = fmt.Errorf("cannot iterate due to unexpected runtime error %v", r) return } }() ... // perhaps a broken pipe in a global var // or an included module threw a panic at you! } But if you are still not convinced... Here is the Go FAQ We believe that coupling exceptions to a control structure, as in the try-catch-finally idiom, results in convoluted code. It also tends to encourage programmers to label too many ordinary errors, such as failing to open a file, as exceptional. Go takes a different approach. For plain error handling, Go's multi-value returns make it easy to report an error without overloading the return value. A canonical error type, coupled with Go's other features, makes error handling pleasant but quite different from that in other languages. A: A panic typically means something went unexpectedly wrong. Mostly used to fail fast on errors that shouldn’t occur during normal operation, or that we aren’t prepared to handle gracefully. So in this case just return the error, you don't want your program to panic. A: I think none of the previous answers are correct: * *By default, if we don't know what to do with the "error" code must panic following best programming patterns: https://en.wikipedia.org/wiki/Fail-fast Putting it more formally, our "Turing Machine" is broken and we need to come back to an "stable state" or "reset state". More info at https://en.wikipedia.org/wiki/Reset_(computing) For example in web (micro)services that means returning a 40X error (panic caused by input from user) or 50X error (panic caused by something else - hardware, network, assert error, ...) * *If we know what to do with the "error", then we do not have an error in first place, but an uncomfortable return value. This is a normal execution condition and probably not an error. Normally this correspond to the happy vs non-happy path modeling. In a summary, the err return value is mostly a wrong idea, even if the GO community has adopted it as a religion. Using error return values is just a patchy way to speed up program execution since it require fewer CPU instructions to be implemented, but most of the time, except for low-level services, it is useless and promote dirty code. (note that GO was designed to implement those low-level services as an "easy-C", but it was adopted for high-level (Level 7) application programs when an error must fail fast to avoid continuing with undefined states that can potentially cause money being lost of fatal casualties. In case of doubt, default to panic. A: Don't use panic for normal error handling. Use error and multiple return values. See https://golang.org/doc/effective_go.html#errors.
stackoverflow
{ "language": "en", "length": 1490, "provenance": "stackexchange_0000F.jsonl.gz:852691", "question_score": "54", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44504354" }
9c416a4c96aa4dda2cbdcceaadb147373397bfea
Stackoverflow Stackexchange Q: Swift: Nested optionals in a single guard statement I am trying to guard a conversion from string to Float to Int: guard let v = Int (Float("x")) else { return -1 } The swift 3 compiler complains: value of optional type 'Float?' not unwrapped; did you mean to use '!' or '?'? Adding "?" does not help, though. And "!" would be wrong here, wouldn't it? Is it possible to solve this, without having to use two lines or two guard statements? A: Optional has a map function made just for this: guard let v = Float("x").map(Int.init) else { return nil }
Q: Swift: Nested optionals in a single guard statement I am trying to guard a conversion from string to Float to Int: guard let v = Int (Float("x")) else { return -1 } The swift 3 compiler complains: value of optional type 'Float?' not unwrapped; did you mean to use '!' or '?'? Adding "?" does not help, though. And "!" would be wrong here, wouldn't it? Is it possible to solve this, without having to use two lines or two guard statements? A: Optional has a map function made just for this: guard let v = Float("x").map(Int.init) else { return nil } A: You can do it with one guard statement with an intermediate variable: guard let f = Float("x"), case let v = Int(f) else { return } Note: The case is there as a workaround for the fact that Int(f) does not return an optional value. (Thanks for the idea, @Hamish)
stackoverflow
{ "language": "en", "length": 154, "provenance": "stackexchange_0000F.jsonl.gz:852757", "question_score": "3", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44504563" }
f3dfc0dd995ab205b5f0d5f1f34dacd34f5991d2
Stackoverflow Stackexchange Q: Git log showing password in author field I can see the password of the git(Bitbucket) when I run git log command locally. commit af45fb439beb489f473b6e935dfc..... Author: username <Password> Date: Sat Jun 10 15:32:30 2017 +0530 .... How I can remove password from author field from next check in as well as historical check ins ? A: You'll have to rewrite the history of the branch setting up the right author. https://help.github.com/articles/changing-author-info/
Q: Git log showing password in author field I can see the password of the git(Bitbucket) when I run git log command locally. commit af45fb439beb489f473b6e935dfc..... Author: username <Password> Date: Sat Jun 10 15:32:30 2017 +0530 .... How I can remove password from author field from next check in as well as historical check ins ? A: You'll have to rewrite the history of the branch setting up the right author. https://help.github.com/articles/changing-author-info/ A: bro. this problem is about your git config. you can try to view you git config: git config --list so ,you can modify your gitconfig file to fix this problem. vim ~/.gitconfig A: To reset your username in a commit: git rebase -i <commit-id of the previous commit> Once the editor opens, change the pick in front of the commit to be edited to edit . Save and exit. Then, git commit --amend --author="Author name <email>" git rebase --continue A: Ran into the same problem. This link is quite helpful:change author name If you want to change the author name only for the last commit, do git commit --amend --author="name <email>" and you should be good Changing author info for multiple older commits can be done using git rebase. Refer to the above link for exact steps. Also, edit the author name in your git config so that future commits will have the correct info. vi ~/.gitconfig and edit the username field.
stackoverflow
{ "language": "en", "length": 234, "provenance": "stackexchange_0000F.jsonl.gz:852772", "question_score": "4", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44504602" }
17b3dc7105f3f46281cf4e11fe1792ef2c9f079a
Stackoverflow Stackexchange Q: ScheduledExecutorService every time execute in different delay I'm encountering a very strange problem in my server when executing some jobs. My code: @WebListener public class ReportingScheduler implements ServletContextListener { private ScheduledExecutorService scheduler; @Override public void contextInitialized(ServletContextEvent event) { scheduler = Executors.newSingleThreadScheduledExecutor(); scheduler.scheduleAtFixedRate(new ReportingJob(), 0, 10, TimeUnit.MINUTES); } @Override public void contextDestroyed(ServletContextEvent event) { scheduler.shutdownNow(); } The method run() inside ReportingJob() is only printing. You can see my schedule is supposed to run every 10 minutes. I'm running this on tomcat. On my localhost, everything is all good. But in my server (Ubuntu 16.4) every time the job executes, execute earlier. I don't know why. Some of my logs in ubuntu server: 2017-06-12 16:15:02 INFO ReportingJob:49 - START 10min JOB 2017-06-12 16:24:57 INFO ReportingJob:49 - START 10min JOB 2017-06-12 16:34:27 INFO ReportingJob:49 - START 10min JOB 2017-06-12 16:43:58 INFO ReportingJob:49 - START 10min JOB On my localhost: 2017-06-12 16:15:02 INFO ReportingJob:49 - START 10min JOB 2017-06-12 16:25:02 INFO ReportingJob:49 - START 10min JOB 2017-06-12 16:35:02 INFO ReportingJob:49 - START 10min JOB 2017-06-12 16:45:02 INFO ReportingJob:49 - START 10min JOB I already checked the server.xml config and it's the same.
Q: ScheduledExecutorService every time execute in different delay I'm encountering a very strange problem in my server when executing some jobs. My code: @WebListener public class ReportingScheduler implements ServletContextListener { private ScheduledExecutorService scheduler; @Override public void contextInitialized(ServletContextEvent event) { scheduler = Executors.newSingleThreadScheduledExecutor(); scheduler.scheduleAtFixedRate(new ReportingJob(), 0, 10, TimeUnit.MINUTES); } @Override public void contextDestroyed(ServletContextEvent event) { scheduler.shutdownNow(); } The method run() inside ReportingJob() is only printing. You can see my schedule is supposed to run every 10 minutes. I'm running this on tomcat. On my localhost, everything is all good. But in my server (Ubuntu 16.4) every time the job executes, execute earlier. I don't know why. Some of my logs in ubuntu server: 2017-06-12 16:15:02 INFO ReportingJob:49 - START 10min JOB 2017-06-12 16:24:57 INFO ReportingJob:49 - START 10min JOB 2017-06-12 16:34:27 INFO ReportingJob:49 - START 10min JOB 2017-06-12 16:43:58 INFO ReportingJob:49 - START 10min JOB On my localhost: 2017-06-12 16:15:02 INFO ReportingJob:49 - START 10min JOB 2017-06-12 16:25:02 INFO ReportingJob:49 - START 10min JOB 2017-06-12 16:35:02 INFO ReportingJob:49 - START 10min JOB 2017-06-12 16:45:02 INFO ReportingJob:49 - START 10min JOB I already checked the server.xml config and it's the same.
stackoverflow
{ "language": "en", "length": 189, "provenance": "stackexchange_0000F.jsonl.gz:852795", "question_score": "3", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44504699" }
b0f248355e94313bcc135c0ed627ede030476962
Stackoverflow Stackexchange Q: REST access to Spring Config Server from non-spring application Does anyone know if its possible to use configuration values from the spring config server via a REST interface. If so, is there any documentation on the interface? TIA. A: The official API doc is hosted on GitHub. I have used the REST API manually for testing purposes. I found this sample app to be useful. API Resources | Path | Description | | ----------------------------------- | ------------------------------------------------------------------- | | /{app}/{profile} | Configuration data for app in Spring profile (comma-separated). | | /{app}/{profile}/{label} | Add a git label | | /{app}/{profiels}{label}/{path} | An environment-specific plain text config file (at "path") |
Q: REST access to Spring Config Server from non-spring application Does anyone know if its possible to use configuration values from the spring config server via a REST interface. If so, is there any documentation on the interface? TIA. A: The official API doc is hosted on GitHub. I have used the REST API manually for testing purposes. I found this sample app to be useful. API Resources | Path | Description | | ----------------------------------- | ------------------------------------------------------------------- | | /{app}/{profile} | Configuration data for app in Spring profile (comma-separated). | | /{app}/{profile}/{label} | Add a git label | | /{app}/{profiels}{label}/{path} | An environment-specific plain text config file (at "path") |
stackoverflow
{ "language": "en", "length": 110, "provenance": "stackexchange_0000F.jsonl.gz:852800", "question_score": "12", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44504710" }
6ce8e03571c009824136e6702ce1e2a7be893250
Stackoverflow Stackexchange Q: How do I get the number of distinct files in a directory in powershell? I currently have this code which returns the total number of files but I don't want to count multiple files in this number count? Lets say I have this: 01Red.txt 01Blue.txt 02Red.txt 05Red.txt 05Green.txt Get-ChildItem -File *.txt -Path "C:\Users\Test\Desktop\TestDirectory" | Measure-Object | %{$_.Count} I want to return a total count of 3 based on 01,02,05 but with my code I get 5. How can I get it to return 3 and ignore everything past the first 2 characters in the string? A: I might suggest Group-Object: Get-ChildItem *.txt | Group-Object { $_.Name.Substring(0,2) } Add | Measure-Object to count the number of groupings (this would be 3 in your example).
Q: How do I get the number of distinct files in a directory in powershell? I currently have this code which returns the total number of files but I don't want to count multiple files in this number count? Lets say I have this: 01Red.txt 01Blue.txt 02Red.txt 05Red.txt 05Green.txt Get-ChildItem -File *.txt -Path "C:\Users\Test\Desktop\TestDirectory" | Measure-Object | %{$_.Count} I want to return a total count of 3 based on 01,02,05 but with my code I get 5. How can I get it to return 3 and ignore everything past the first 2 characters in the string? A: I might suggest Group-Object: Get-ChildItem *.txt | Group-Object { $_.Name.Substring(0,2) } Add | Measure-Object to count the number of groupings (this would be 3 in your example). A: Get-ChildItem -File *.txt -Path "C:\Users\Test\Desktop\TestDirectory" | select {$_.BaseName.Substring(0,2)} | Get-Unique -AsString | measure
stackoverflow
{ "language": "en", "length": 138, "provenance": "stackexchange_0000F.jsonl.gz:852803", "question_score": "4", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44504720" }
2b12498512767088661a347183eb4519404a4e5d
Stackoverflow Stackexchange Q: How to set [name] of output.chunkFilename to be the same with [name] of output.filename? In webpack 2 I defined output like this: module.exports.output = { filename: `js/[name].js`, chunkFilename: `js/[id].[name].js`, } When devServer starts, entry file is main.js as expected. However, the chunk file is requested as 0.0.js, what should I do if I want 0.main.js? Also I'm curious why can't chunkFilename's [name] be in accord with filename's [name]?
Q: How to set [name] of output.chunkFilename to be the same with [name] of output.filename? In webpack 2 I defined output like this: module.exports.output = { filename: `js/[name].js`, chunkFilename: `js/[id].[name].js`, } When devServer starts, entry file is main.js as expected. However, the chunk file is requested as 0.0.js, what should I do if I want 0.main.js? Also I'm curious why can't chunkFilename's [name] be in accord with filename's [name]?
stackoverflow
{ "language": "en", "length": 69, "provenance": "stackexchange_0000F.jsonl.gz:852821", "question_score": "3", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44504783" }
29ffd81286c96c5d1f15ff72b72b9093f1f06150
Stackoverflow Stackexchange Q: ionic 3 - Runtime Error function not defined - ReferenceError: function is not defined at HTMLButtonElement.onclick I have a button for which I defined a doLogout function for the onclick property, but everytime I click on that button the following error is shown: Runtime Error function not defined - ReferenceError: function is not defined at HTMLButtonElement.onclick The code is very simple, and I used it in other pages, where the function is called correctly. This is my HTML file: <ion-header> <ion-navbar> <ion-title>Logout</ion-title> </ion-navbar> </ion-header> <ion-content> <div padding> <button ion-button block onclick="doLogout()">Logout</button> </div> </ion-content> And this is the ts file: export class LogoutPage { constructor(public navCtrl: NavController, public navParams: NavParams, public api : Api) { } doLogout(){ //does something } } A: Ionic2/3 is built on top of Angular2/4 (or just Angular) so the correct way to use the click event would be: <button ion-button block (click)="doLogout()">Logout</button> You can find more information in Angular docs
Q: ionic 3 - Runtime Error function not defined - ReferenceError: function is not defined at HTMLButtonElement.onclick I have a button for which I defined a doLogout function for the onclick property, but everytime I click on that button the following error is shown: Runtime Error function not defined - ReferenceError: function is not defined at HTMLButtonElement.onclick The code is very simple, and I used it in other pages, where the function is called correctly. This is my HTML file: <ion-header> <ion-navbar> <ion-title>Logout</ion-title> </ion-navbar> </ion-header> <ion-content> <div padding> <button ion-button block onclick="doLogout()">Logout</button> </div> </ion-content> And this is the ts file: export class LogoutPage { constructor(public navCtrl: NavController, public navParams: NavParams, public api : Api) { } doLogout(){ //does something } } A: Ionic2/3 is built on top of Angular2/4 (or just Angular) so the correct way to use the click event would be: <button ion-button block (click)="doLogout()">Logout</button> You can find more information in Angular docs
stackoverflow
{ "language": "en", "length": 155, "provenance": "stackexchange_0000F.jsonl.gz:852822", "question_score": "3", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44504785" }
17f395499de4842f9f59e906282647f9b4338ebd
Stackoverflow Stackexchange Q: How to stop robocopy from exiting the build? I'm using Gitlab 8.15.4 and the latest runner for that build. Because of our firewall I can't run npm install so I'm copying the node-modules from another location into the build folder. The runner is on a Windows 7 machine. My first attempt: (.gitlab-ci.yml) before_script: - robocopy S:\Storage\GitLab-Runner\Assets\node_modules .\node_modules /s build: stage: build script: - echo starting - gulp - echo done artifacts: paths: - deploy.zip Fails the build with the error: ERROR: Job failed: exit status 1 My second (nth) try puts the robocopy into a script file and executes it from there: (.gitlab-ci.yml) before_script: - S:\Storage\GitLab-Runner\Scripts\CopyAssets.bat build: stage: build script: - echo starting - gulp - echo done artifacts: paths: - deploy.zip (CopyAssets.bat) robocopy S:\Storage\GitLab-Runner\Assets\node_modules .\node_modules /s set/A errlev="%ERRORLEVEL% & 24" exit/B %errlev% Passes but does not execute any other steps. How can I prevent RoboCopy from exiting the build when it finishes? A: For powershell users: (robocopy src dst) ; if ($lastexitcode -lt 8) { $global:LASTEXITCODE = $null } or cmd /c (robocopy src dst) ^& IF %ERRORLEVEL% LEQ 1 exit 0 I have tested on GitLab 13.12 with GitLab Runner powershell and it worked well.
Q: How to stop robocopy from exiting the build? I'm using Gitlab 8.15.4 and the latest runner for that build. Because of our firewall I can't run npm install so I'm copying the node-modules from another location into the build folder. The runner is on a Windows 7 machine. My first attempt: (.gitlab-ci.yml) before_script: - robocopy S:\Storage\GitLab-Runner\Assets\node_modules .\node_modules /s build: stage: build script: - echo starting - gulp - echo done artifacts: paths: - deploy.zip Fails the build with the error: ERROR: Job failed: exit status 1 My second (nth) try puts the robocopy into a script file and executes it from there: (.gitlab-ci.yml) before_script: - S:\Storage\GitLab-Runner\Scripts\CopyAssets.bat build: stage: build script: - echo starting - gulp - echo done artifacts: paths: - deploy.zip (CopyAssets.bat) robocopy S:\Storage\GitLab-Runner\Assets\node_modules .\node_modules /s set/A errlev="%ERRORLEVEL% & 24" exit/B %errlev% Passes but does not execute any other steps. How can I prevent RoboCopy from exiting the build when it finishes? A: For powershell users: (robocopy src dst) ; if ($lastexitcode -lt 8) { $global:LASTEXITCODE = $null } or cmd /c (robocopy src dst) ^& IF %ERRORLEVEL% LEQ 1 exit 0 I have tested on GitLab 13.12 with GitLab Runner powershell and it worked well. A: You and a lot of other people have encountered this issue with robocopy in CI deployment. As I have found this question being unanswered for some time and the other answers being incompatible with continuing the script after robocopy, I want to share the solution here. If you want robocopy to ignore all return codes under 8 (>= 8 means copy error), you need a condition that follows the command directly and changes the error level. (robocopy src dst) ^& IF %ERRORLEVEL% LSS 8 SET ERRORLEVEL = 0
stackoverflow
{ "language": "en", "length": 287, "provenance": "stackexchange_0000F.jsonl.gz:852829", "question_score": "9", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44504795" }
57dd86113727a199bf4bec401046b96e319ac747
Stackoverflow Stackexchange Q: Including the decorator in the documentation For this code: @my_decorator(arg='abc') def my_function(x, y): """ Return the sum of x and y""" I'd like to automatically generate the documentation that shows not just the original (pre-decorator) function, but also the decorator itself. So something like this: @my_decorator(arg='abc') my_function(x, y)   Return the sum of x and y Preserving the signature of the original function is not a problem (using either functools.wraps or decorator library). But how can I get sphinx to include the decorator itself? Given that sphinx autofunction looks at each function in runtime (after importing the module), the decorator was already applied, so it seems the information about the decorator is already irretrievably lost? Perhaps there's a plugin that can grab the decorator information from the static source file and combine it with whatever sphinx generated? I have control over the decorator, if that helps.
Q: Including the decorator in the documentation For this code: @my_decorator(arg='abc') def my_function(x, y): """ Return the sum of x and y""" I'd like to automatically generate the documentation that shows not just the original (pre-decorator) function, but also the decorator itself. So something like this: @my_decorator(arg='abc') my_function(x, y)   Return the sum of x and y Preserving the signature of the original function is not a problem (using either functools.wraps or decorator library). But how can I get sphinx to include the decorator itself? Given that sphinx autofunction looks at each function in runtime (after importing the module), the decorator was already applied, so it seems the information about the decorator is already irretrievably lost? Perhaps there's a plugin that can grab the decorator information from the static source file and combine it with whatever sphinx generated? I have control over the decorator, if that helps.
stackoverflow
{ "language": "en", "length": 146, "provenance": "stackexchange_0000F.jsonl.gz:852840", "question_score": "3", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44504827" }
8e16f644acc2f23bba3822c7209b38e2ef5b00b6
Stackoverflow Stackexchange Q: Using OR r/m32, imm32 in NASM The opcode generated by: or ebx, 0ffffffffh with NASM is: 83CBFF But in Intel Instructions Manual: 81 /1 id OR r/m32, imm32 83 /1 ib OR r/m32, imm8 My question is, why NASM used the opcode 83 instead of 81, and how to generate the opcode 81? this is the command line for NASM: nasm -fwin32 file.asm -l list.lst A: NASM picks the 8-bit operand size as an optimization, because it does the same thing and takes less space. You can force NASM to use a specific operand size with: or ebx, strict dword 0ffffffffh This results in: 81 cb ff ff ff ff Assembling the original code without optimizations (nasm -O0) will also give this result. Note that if the register is EAX, doing this will result in the 0D opcode (mov eax, imm32) instead of 81. So in that case you might have to output the instruction yourself: db 0x81, 0xc8, 0xff, 0xff, 0xff, 0xff.
Q: Using OR r/m32, imm32 in NASM The opcode generated by: or ebx, 0ffffffffh with NASM is: 83CBFF But in Intel Instructions Manual: 81 /1 id OR r/m32, imm32 83 /1 ib OR r/m32, imm8 My question is, why NASM used the opcode 83 instead of 81, and how to generate the opcode 81? this is the command line for NASM: nasm -fwin32 file.asm -l list.lst A: NASM picks the 8-bit operand size as an optimization, because it does the same thing and takes less space. You can force NASM to use a specific operand size with: or ebx, strict dword 0ffffffffh This results in: 81 cb ff ff ff ff Assembling the original code without optimizations (nasm -O0) will also give this result. Note that if the register is EAX, doing this will result in the 0D opcode (mov eax, imm32) instead of 81. So in that case you might have to output the instruction yourself: db 0x81, 0xc8, 0xff, 0xff, 0xff, 0xff.
stackoverflow
{ "language": "en", "length": 164, "provenance": "stackexchange_0000F.jsonl.gz:852878", "question_score": "8", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44504943" }
75700ecf6794ba5046e8e536d94595da907338d1
Stackoverflow Stackexchange Q: Espresso Testing: programmatically select 'never' on smart lock save password Doing espresso UI testing on Android and trying to bypass the smart lock screen. Issue is that we are testing in Firebase, and cannot turn off smart lock on the device as a whole since they are in the cloud (I don't believe). The popup is also not a part of the app itself, so I can't get the id using Stetho or Layout inspector. I believe it's another app (or OS feature) drawing on top of our app. How can I hit the "no thanks" or "never" button in espresso? A: You can use uiautomator graddle: androidTestCompile 'com.android.support.test.uiautomator:uiautomator-v18:2.1.3' And add it to your code: waitSeconds(10) // Waiting for smartlock window val uiDevice = UiDevice.getInstance(InstrumentationRegistry.getInstrumentation()) val currentPackageName = uiDevice.currentPackageName if (currentPackageName != "your.app.package") { uiDevice.pressBack() }
Q: Espresso Testing: programmatically select 'never' on smart lock save password Doing espresso UI testing on Android and trying to bypass the smart lock screen. Issue is that we are testing in Firebase, and cannot turn off smart lock on the device as a whole since they are in the cloud (I don't believe). The popup is also not a part of the app itself, so I can't get the id using Stetho or Layout inspector. I believe it's another app (or OS feature) drawing on top of our app. How can I hit the "no thanks" or "never" button in espresso? A: You can use uiautomator graddle: androidTestCompile 'com.android.support.test.uiautomator:uiautomator-v18:2.1.3' And add it to your code: waitSeconds(10) // Waiting for smartlock window val uiDevice = UiDevice.getInstance(InstrumentationRegistry.getInstrumentation()) val currentPackageName = uiDevice.currentPackageName if (currentPackageName != "your.app.package") { uiDevice.pressBack() }
stackoverflow
{ "language": "en", "length": 137, "provenance": "stackexchange_0000F.jsonl.gz:852887", "question_score": "4", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44504969" }
abcdb60bea9c46f390ae331b5666c624a9228e67
Stackoverflow Stackexchange Q: Poor man's alternative to _mm_cvttpd_epi64 On AXV512DQ, there is _mm_cvttpd_epi64, for example in file avx512vldqintrin.h we find static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvttpd_epi64 (__m128d __A) { return (__m128i) __builtin_ia32_cvttpd2qq128_mask ((__v2df) __A, (__v2di) _mm_setzero_si128(), (__mmask8) -1); } which converts two packed 64-bit floats (__m128d) to two packed 64-bit integers (__m128i). There is also _mm256_cvttpd_epi64 for converting four packed 64-bit floats (__m256d) to four packed 64-bit integers (__m256i). However, many machines do not support AXV512DQ. So I wonder what the best version of a poor man's alternative for this is. I should say that I'm already happy with a solution that works only for 64-bit floats which can be loss-free converted to 32-bit floats.
Q: Poor man's alternative to _mm_cvttpd_epi64 On AXV512DQ, there is _mm_cvttpd_epi64, for example in file avx512vldqintrin.h we find static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvttpd_epi64 (__m128d __A) { return (__m128i) __builtin_ia32_cvttpd2qq128_mask ((__v2df) __A, (__v2di) _mm_setzero_si128(), (__mmask8) -1); } which converts two packed 64-bit floats (__m128d) to two packed 64-bit integers (__m128i). There is also _mm256_cvttpd_epi64 for converting four packed 64-bit floats (__m256d) to four packed 64-bit integers (__m256i). However, many machines do not support AXV512DQ. So I wonder what the best version of a poor man's alternative for this is. I should say that I'm already happy with a solution that works only for 64-bit floats which can be loss-free converted to 32-bit floats.
stackoverflow
{ "language": "en", "length": 112, "provenance": "stackexchange_0000F.jsonl.gz:853000", "question_score": "5", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44505316" }
34a68375d613bf36a8b1b63dffa37a98204df2d5
Stackoverflow Stackexchange Q: Access elementRef inside a ng-template contained in a directive In one of my component's html, we use some <ng-template> tags. Inside tags, we would like to define a reference to access the nativeElement of the div from a @ViewChild. The element Ref I want to recover is in tabset directive, within a ng-template of the sub directive p-datatable sub. Here is our component.html code (we use primeNG and ngx-bootstrap modules and tabs / datatable directives): <tabset #staticTabs> <tab> <ng-template tabHeading> <span class="hidden-sm-down"> Tab Header Text </span> </ng-template> <p-dataTable [value]="data"> <p-column field="id" header="Id"></p-column> <p-column field="context" header="Context"> <ng-template let-context="rowData" pTemplate="body"> <div class="container" #iWantThisElementNative></div> </ng-template> </p-column> </p-dataTable> </tab> </tabset> And our component.ts: export class AppComponent implements OnInit, AfterViewInit { @ViewChild('iWantThisElementNative') iWantThisElementNative; ... ngAfterViewInit() { console.log(this.iWantThisElementNative.nativeElement); } } And the error we encounter: ERROR Error: Uncaught (in promise): TypeError: Cannot read property 'nativeElement' of undefined I tried to move the div with reference on top of tabset, and it works properly. I tried to move the div outside of ng-template and it works too. How to access an elementRef which is placed inside custom directives, within <ng-template>?
Q: Access elementRef inside a ng-template contained in a directive In one of my component's html, we use some <ng-template> tags. Inside tags, we would like to define a reference to access the nativeElement of the div from a @ViewChild. The element Ref I want to recover is in tabset directive, within a ng-template of the sub directive p-datatable sub. Here is our component.html code (we use primeNG and ngx-bootstrap modules and tabs / datatable directives): <tabset #staticTabs> <tab> <ng-template tabHeading> <span class="hidden-sm-down"> Tab Header Text </span> </ng-template> <p-dataTable [value]="data"> <p-column field="id" header="Id"></p-column> <p-column field="context" header="Context"> <ng-template let-context="rowData" pTemplate="body"> <div class="container" #iWantThisElementNative></div> </ng-template> </p-column> </p-dataTable> </tab> </tabset> And our component.ts: export class AppComponent implements OnInit, AfterViewInit { @ViewChild('iWantThisElementNative') iWantThisElementNative; ... ngAfterViewInit() { console.log(this.iWantThisElementNative.nativeElement); } } And the error we encounter: ERROR Error: Uncaught (in promise): TypeError: Cannot read property 'nativeElement' of undefined I tried to move the div with reference on top of tabset, and it works properly. I tried to move the div outside of ng-template and it works too. How to access an elementRef which is placed inside custom directives, within <ng-template>?
stackoverflow
{ "language": "en", "length": 184, "provenance": "stackexchange_0000F.jsonl.gz:853020", "question_score": "7", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44505388" }
273aa48e15b194cec89f9d8802292d512f8335d8
Stackoverflow Stackexchange Q: Use of @objc inference in Swift 4 I am getting this warning: Argument of '#selector' refers to instance method 'customButtonDidTouch()' in 'CustomTableViewController' that depends on '@objc' attribute inference deprecated in Swift 4 Xcode suggests adding @objc to my customButtonDidTouch() method, but doing so yields this other warning: The use of Swift 3 @objc inference in Swift 4 mode is deprecated. Please address deprecated @objc inference warnings, test your code with “Use of deprecated Swift 3 @objc inference” logging enabled, and disable Swift 3 @objc inference. Any suggestions?
Q: Use of @objc inference in Swift 4 I am getting this warning: Argument of '#selector' refers to instance method 'customButtonDidTouch()' in 'CustomTableViewController' that depends on '@objc' attribute inference deprecated in Swift 4 Xcode suggests adding @objc to my customButtonDidTouch() method, but doing so yields this other warning: The use of Swift 3 @objc inference in Swift 4 mode is deprecated. Please address deprecated @objc inference warnings, test your code with “Use of deprecated Swift 3 @objc inference” logging enabled, and disable Swift 3 @objc inference. Any suggestions?
stackoverflow
{ "language": "en", "length": 88, "provenance": "stackexchange_0000F.jsonl.gz:853021", "question_score": "4", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44505392" }
b220444b4040ba1dab82aced0355df6d2bb8f56e
Stackoverflow Stackexchange Q: Checking if an environment variable exists and is set to True So, I want to check and verify if a given variable "abc" exists and that it's true. If the variable exist and is False, then I want it to go to else. Here is how I got it to work in python: env = os.environ.copy() if "abc" in env and env['abc'] == "True": print "Works" else: print "Doesn't work" Is there a better way to do it? A: You can check to see if the variable is in the dictionaries returned by globals() and locals(). (Thank you to Aaron for reminding me to add the full code) For a local variable: if locals().get('abc'): print(abc) For a global variable: if globals().get('abc'): print(abc) For an environment variable: if os.environ.get('abc')=='True': #abc is set to True More information here: https://docs.python.org/3/library/functions.html#locals https://docs.python.org/3/library/functions.html#globals
Q: Checking if an environment variable exists and is set to True So, I want to check and verify if a given variable "abc" exists and that it's true. If the variable exist and is False, then I want it to go to else. Here is how I got it to work in python: env = os.environ.copy() if "abc" in env and env['abc'] == "True": print "Works" else: print "Doesn't work" Is there a better way to do it? A: You can check to see if the variable is in the dictionaries returned by globals() and locals(). (Thank you to Aaron for reminding me to add the full code) For a local variable: if locals().get('abc'): print(abc) For a global variable: if globals().get('abc'): print(abc) For an environment variable: if os.environ.get('abc')=='True': #abc is set to True More information here: https://docs.python.org/3/library/functions.html#locals https://docs.python.org/3/library/functions.html#globals A: You can use: env.get("abc", False) False is the default value if "abc" is not in env. A: You could use a Try Except Block. try: # Try calling ABC here anyway you like # Here I am just printing it print(abc) except NameError: print("Variable ABC does not exist") A: It's enough get it from env, I think env.get('abc')
stackoverflow
{ "language": "en", "length": 198, "provenance": "stackexchange_0000F.jsonl.gz:853042", "question_score": "9", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44505457" }
e0dc44bdc6a1b7c37da40c691273827eec1f1a7f
Stackoverflow Stackexchange Q: How to configure a non-default serviceAccount on a deployment My Understanding of this doc page is, that I can configure service accounts with Pods and hopefully also deployments, so I can access the k8s API in Kubernetes 1.6+. In order not to alter or use the default one I want to create service account and mount certificate into the pods of a deployment. How do I achieve something similar like in this example for a deployment? apiVersion: v1 kind: Pod metadata: name: my-pod spec: serviceAccountName: build-robot automountServiceAccountToken: false A: As you will need to specify 'podSpec' in Deployment as well, you should be able to configure the service account in the same way. Something like: apiVersion: extensions/v1beta1 kind: Deployment metadata: name: my-deployment spec: template: # Below is the podSpec. metadata: name: ... spec: serviceAccountName: build-robot automountServiceAccountToken: false ...
Q: How to configure a non-default serviceAccount on a deployment My Understanding of this doc page is, that I can configure service accounts with Pods and hopefully also deployments, so I can access the k8s API in Kubernetes 1.6+. In order not to alter or use the default one I want to create service account and mount certificate into the pods of a deployment. How do I achieve something similar like in this example for a deployment? apiVersion: v1 kind: Pod metadata: name: my-pod spec: serviceAccountName: build-robot automountServiceAccountToken: false A: As you will need to specify 'podSpec' in Deployment as well, you should be able to configure the service account in the same way. Something like: apiVersion: extensions/v1beta1 kind: Deployment metadata: name: my-deployment spec: template: # Below is the podSpec. metadata: name: ... spec: serviceAccountName: build-robot automountServiceAccountToken: false ... A: kubernetes nginx-deployment.yaml where serviceAccountName: test-sa used as non default service account Link: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ test-sa.yaml apiVersion: v1 kind: ServiceAccount metadata: name: test-sa namespace: test-ns nginx-deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: name: nginx namespace: test-ns spec: strategy: type: Recreate selector: matchLabels: app: nginx replicas: 1 # tells deployment to run 1 pods matching the template template: # create pods using pod definition in this template metadata: labels: app: nginx spec: serviceAccountName: test-sa containers: - name: nginx image: nginx ports: - containerPort: 80
stackoverflow
{ "language": "en", "length": 220, "provenance": "stackexchange_0000F.jsonl.gz:853044", "question_score": "58", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44505461" }
d27ca1a961f0d7027cde471df19a99c48100cc4c
Stackoverflow Stackexchange Q: How to filter certain type of exceptions in logback I used some exceptions to handle some validation logic. The thing is that spring boot (1.5) with logback logs all exceptions by default. Can you help me with the properties to add in the in logback.xml. A: Add filter with expression in your logback xml. Below you will find logback xml. Apart from this you have to add janino dependency in your pom.xml <!-- logback --> <dependency> <groupId>org.codehaus.janino</groupId> <artifactId>janino</artifactId> <version>2.5.16</version> </dependency> <dependency> <groupId>ch.qos.logback</groupId> <artifactId>logback-classic</artifactId> <version>1.1.2</version> </dependency> <dependency> <groupId>ch.qos.logback</groupId> <artifactId>logback-core</artifactId> <version>1.1.2</version> </dependency> logback.xml <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender"> <encoder> <pattern>%d{HH:mm:ss.SSS} [%thread] %-5level %logger{5} - %msg%n </pattern> </encoder> </appender> <appender name="test" class="ch.qos.logback.core.ConsoleAppender"> <encoder> <pattern>%d{yyyy-MM-dd HH:mm:ss} [%thread] %-5level %logger{35} - %msg%n</pattern> </encoder> <filter class="ch.qos.logback.core.filter.EvaluatorFilter"> <evaluator> <expression>return (message.contains("javax.management.InstanceAlreadyExistsException")); </expression> </evaluator> <OnMismatch>NEUTRAL</OnMismatch> <OnMatch>DENY</OnMatch> </filter> </appender> <logger name="com.logbacke.example" level="ERROR" additivity="false"> <appender-ref ref="test" /> </logger> <!-- By default, the level of the root level is set to DEBUG --> <root level="DEBUG"> <appender-ref ref="STDOUT" /> </root>
Q: How to filter certain type of exceptions in logback I used some exceptions to handle some validation logic. The thing is that spring boot (1.5) with logback logs all exceptions by default. Can you help me with the properties to add in the in logback.xml. A: Add filter with expression in your logback xml. Below you will find logback xml. Apart from this you have to add janino dependency in your pom.xml <!-- logback --> <dependency> <groupId>org.codehaus.janino</groupId> <artifactId>janino</artifactId> <version>2.5.16</version> </dependency> <dependency> <groupId>ch.qos.logback</groupId> <artifactId>logback-classic</artifactId> <version>1.1.2</version> </dependency> <dependency> <groupId>ch.qos.logback</groupId> <artifactId>logback-core</artifactId> <version>1.1.2</version> </dependency> logback.xml <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender"> <encoder> <pattern>%d{HH:mm:ss.SSS} [%thread] %-5level %logger{5} - %msg%n </pattern> </encoder> </appender> <appender name="test" class="ch.qos.logback.core.ConsoleAppender"> <encoder> <pattern>%d{yyyy-MM-dd HH:mm:ss} [%thread] %-5level %logger{35} - %msg%n</pattern> </encoder> <filter class="ch.qos.logback.core.filter.EvaluatorFilter"> <evaluator> <expression>return (message.contains("javax.management.InstanceAlreadyExistsException")); </expression> </evaluator> <OnMismatch>NEUTRAL</OnMismatch> <OnMatch>DENY</OnMatch> </filter> </appender> <logger name="com.logbacke.example" level="ERROR" additivity="false"> <appender-ref ref="test" /> </logger> <!-- By default, the level of the root level is set to DEBUG --> <root level="DEBUG"> <appender-ref ref="STDOUT" /> </root>
stackoverflow
{ "language": "en", "length": 156, "provenance": "stackexchange_0000F.jsonl.gz:853046", "question_score": "4", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44505467" }
50f9355ef530f86410a95859a1eacc125b0fdfb5
Stackoverflow Stackexchange Q: How to make a circular kernel? For the Code below, I am wondering how to make a circular kernel instead of a rectangular one. I am currently looking at something circular, and I want to find the BGR average values for it. By adjusting my kernel, my data will be more accurate. for center in c_1: b = img2[center[0]-4: center[0]+5, center[1]-4: center[1]+5, 0] g = img2[center[0]-4: center[0]+5, center[1]-4: center[1]+5, 1] r = img2[center[0]-4: center[0]+5, center[1]-4: center[1]+5, 2] A: From: https://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_morphological_ops/py_morphological_ops.html We manually created a structuring elements in the previous examples with help of Numpy. It is rectangular shape. But in some cases, you may need elliptical/circular shaped kernels. So for this purpose, OpenCV has a function, cv2.getStructuringElement(). You just pass the shape and size of the kernel, you get the desired kernel. # Elliptical Kernel >>> cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)) array([[0, 0, 1, 0, 0], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0]], dtype=uint8)
Q: How to make a circular kernel? For the Code below, I am wondering how to make a circular kernel instead of a rectangular one. I am currently looking at something circular, and I want to find the BGR average values for it. By adjusting my kernel, my data will be more accurate. for center in c_1: b = img2[center[0]-4: center[0]+5, center[1]-4: center[1]+5, 0] g = img2[center[0]-4: center[0]+5, center[1]-4: center[1]+5, 1] r = img2[center[0]-4: center[0]+5, center[1]-4: center[1]+5, 2] A: From: https://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_morphological_ops/py_morphological_ops.html We manually created a structuring elements in the previous examples with help of Numpy. It is rectangular shape. But in some cases, you may need elliptical/circular shaped kernels. So for this purpose, OpenCV has a function, cv2.getStructuringElement(). You just pass the shape and size of the kernel, you get the desired kernel. # Elliptical Kernel >>> cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)) array([[0, 0, 1, 0, 0], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0]], dtype=uint8) A: Get the circle region when given the center, you could try the following function: def circleAverage(center, r = 4): """ """ for i in range(center[0]-r, center[0]+r): for j in range(center[1]-r, center[1] + r): if (center[0] - i) ** 2 + (center[1] - j) ** 2 <= r**2: // do your computation here. Hope this helps you. A: Came here to find how to make a circular (symmetric) kernel. Ended up with my own implementation. import numpy as np def get_circular_kernel(diameter): mid = (diameter - 1) / 2 distances = np.indices((diameter, diameter)) - np.array([mid, mid])[:, None, None] kernel = ((np.linalg.norm(distances, axis=0) - mid) <= 0).astype(int) return kernel Note that for low diameters, behavior is perhaps unexpected. Variable mid when used for the second time can for example be replaced by diameter / 2. A: I've implemented it in a following way: r = 16 kernel = np.fromfunction(lambda x, y: ((x-r)**2 + (y-r)**2 <= r**2)*1, (2*r+1, 2*r+1), dtype=int).astype(np.uint8) Extra type conversion is needed to avoid overflow
stackoverflow
{ "language": "en", "length": 329, "provenance": "stackexchange_0000F.jsonl.gz:853054", "question_score": "3", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44505504" }
71699b4f123e409aa089ebcc0b9e97ae0f8d8c51
Stackoverflow Stackexchange Q: What's the difference between stub.Predict.future and stub.Predict in tensorflow serving examples? I'm checking tf-serving examples and found that inception_client.py used result = stub.Predict(request, 10.0) while mnist_client.py used result_future = stub.Predict.future(request, 5.0). What's the difference? A: Note that the names used to describe the return values of the two calls are different. stub.Predict(request, 10.0) makes a call that blocks until the RPC is complete and then returns the single response message of the RPC. stub.Predict.future(request, 5.0) makes a call that immediately and without blocking returns a grpc.Future for the happening-asynchronously-in-the-background RPC.
Q: What's the difference between stub.Predict.future and stub.Predict in tensorflow serving examples? I'm checking tf-serving examples and found that inception_client.py used result = stub.Predict(request, 10.0) while mnist_client.py used result_future = stub.Predict.future(request, 5.0). What's the difference? A: Note that the names used to describe the return values of the two calls are different. stub.Predict(request, 10.0) makes a call that blocks until the RPC is complete and then returns the single response message of the RPC. stub.Predict.future(request, 5.0) makes a call that immediately and without blocking returns a grpc.Future for the happening-asynchronously-in-the-background RPC.
stackoverflow
{ "language": "en", "length": 91, "provenance": "stackexchange_0000F.jsonl.gz:853085", "question_score": "4", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44505617" }
c56cf3cf05cb60c56f5aaf69ff32cbedcafab317
Stackoverflow Stackexchange Q: Localization - "error: read failed: The data couldn’t be read because it isn’t in the correct format" I´m getting: error: read failed: The data couldn’t be read because it isn’t in the correct format. When trying to run my localized file: "Hi" = "Hi translated" "Bye" = "Bye translated" Any ideas why? Only found other answers in Objective-C. A: Even though you´re using Swift which don´t require ;, you still need to use semicolon in your localized files. Correct format will be: "Hi" = "Hi translated"; "Bye" = "Bye translated";
Q: Localization - "error: read failed: The data couldn’t be read because it isn’t in the correct format" I´m getting: error: read failed: The data couldn’t be read because it isn’t in the correct format. When trying to run my localized file: "Hi" = "Hi translated" "Bye" = "Bye translated" Any ideas why? Only found other answers in Objective-C. A: Even though you´re using Swift which don´t require ;, you still need to use semicolon in your localized files. Correct format will be: "Hi" = "Hi translated"; "Bye" = "Bye translated"; A: Add ; at the end of each line "Hi" = "Hi translated"; "Bye" = "Bye translated";
stackoverflow
{ "language": "en", "length": 108, "provenance": "stackexchange_0000F.jsonl.gz:853097", "question_score": "4", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44505641" }
e11a8fc9af9d75e873992cd15a2eb1659d5cd5c7
Stackoverflow Stackexchange Q: Difference in time measured by clock and steady_clock I was trying to measure the time taken to execute a specific function in my code. Initially I used the clock() function as below clock_t start = clock(); do_something(); clock_t end = clock(); printf("Time taken: %f ms\n", ((double) end - start)*1000/CLOCKS_PER_SEC); Later I was reading about the chrono library in C++11 and tried to measure the same with a std::chrono::steady_clock as below using namespace std::chrono; auto start = steady_clock::now(); do_something(); auto end = steady_clock::now(); printf("Time taken: %lld ms\n", duration_cast<milliseconds>(end - start).count()); The time measured by the first code snippet (using clock) was 89.53 ms and that measured by steady_clock was 1140 ms. Why is there such a big difference in time measured by both the clocks? A: clock measures processor time, whereas steady_clock measures physical time. So you can get differences like this if do_something() was preempted by other processes (such as checking mail or whatever). Daniel H makes a great point below in the comments that this can also happen if do_something() isn't CPU bound. For example if it sleeps, blocks on locking a mutex, waits on a condition variable, etc.
Q: Difference in time measured by clock and steady_clock I was trying to measure the time taken to execute a specific function in my code. Initially I used the clock() function as below clock_t start = clock(); do_something(); clock_t end = clock(); printf("Time taken: %f ms\n", ((double) end - start)*1000/CLOCKS_PER_SEC); Later I was reading about the chrono library in C++11 and tried to measure the same with a std::chrono::steady_clock as below using namespace std::chrono; auto start = steady_clock::now(); do_something(); auto end = steady_clock::now(); printf("Time taken: %lld ms\n", duration_cast<milliseconds>(end - start).count()); The time measured by the first code snippet (using clock) was 89.53 ms and that measured by steady_clock was 1140 ms. Why is there such a big difference in time measured by both the clocks? A: clock measures processor time, whereas steady_clock measures physical time. So you can get differences like this if do_something() was preempted by other processes (such as checking mail or whatever). Daniel H makes a great point below in the comments that this can also happen if do_something() isn't CPU bound. For example if it sleeps, blocks on locking a mutex, waits on a condition variable, etc.
stackoverflow
{ "language": "en", "length": 191, "provenance": "stackexchange_0000F.jsonl.gz:853114", "question_score": "4", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44505683" }
643a4727fdf4397bade57d33b927601cc07a5845
Stackoverflow Stackexchange Q: How to authenticate the Github API request to increase rate limit? I am trying to extract data from the Github API (using python requests library) and facing challenges with authenticating my API request. I have tried multiple ways including HTTPBasic Authentication, HTTPDigestAuthentication and also Oauth authentication.. Below are the codes i used- DigestAuthentication: for i in range(0,2): api_url = re.sub("useri", ul[i], api_url_gen) res = requests.get(api_url, auth=HTTPDigestAuth('user', 'password')) json_content= json.loads(res.text) user_stats.append(json_content) print(i) Basic Authentication: for i in range(0,2): api_url = re.sub("useri", ul[i], api_url_gen) res = requests.get(api_url, auth=HTTPBasicAuth('user', 'password')) json_content= json.loads(res.text) user_stats.append(json_content) print(i) I have also tried running this using the oauth client_id and secret_key Github, however, still when i check my rate limit in the Command prompt, it still shows 60 requests per hour and I am unable to extract data due to this. It would be great if someone could help with this - not sure where I am going wrong with the code. Thanks a lot.
Q: How to authenticate the Github API request to increase rate limit? I am trying to extract data from the Github API (using python requests library) and facing challenges with authenticating my API request. I have tried multiple ways including HTTPBasic Authentication, HTTPDigestAuthentication and also Oauth authentication.. Below are the codes i used- DigestAuthentication: for i in range(0,2): api_url = re.sub("useri", ul[i], api_url_gen) res = requests.get(api_url, auth=HTTPDigestAuth('user', 'password')) json_content= json.loads(res.text) user_stats.append(json_content) print(i) Basic Authentication: for i in range(0,2): api_url = re.sub("useri", ul[i], api_url_gen) res = requests.get(api_url, auth=HTTPBasicAuth('user', 'password')) json_content= json.loads(res.text) user_stats.append(json_content) print(i) I have also tried running this using the oauth client_id and secret_key Github, however, still when i check my rate limit in the Command prompt, it still shows 60 requests per hour and I am unable to extract data due to this. It would be great if someone could help with this - not sure where I am going wrong with the code. Thanks a lot.
stackoverflow
{ "language": "en", "length": 158, "provenance": "stackexchange_0000F.jsonl.gz:853122", "question_score": "3", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44505712" }
d721ad65d02e2de9814c97f10b944b132bbb5d2a
Stackoverflow Stackexchange Q: How to resize (scale) an video using AVAssetExportSession? I've looked around and can't figure this out. I want to resize an AVAsset using AVExportSession to an given width, height. I've looked into AVMutableVideoComposition.renderSize and using a scaling transform on AVMutableVideoCompositionLayerInstruction but neither of these produce expected results (stretching/cropping the final output in weird ways). For example: * *I have a source video that is 640x320 and I want to scale it down to 320x160. *I set the renderSize to CGSizeMake(320.0, 160.0) *I set the transform to CGAffineTransformMakeScale(0.5, 0.5) *I get the following output where the video isn't scaled to the appropriate size (looks too big), stretched, and not centered (cropped). Any idea what is going on?
Q: How to resize (scale) an video using AVAssetExportSession? I've looked around and can't figure this out. I want to resize an AVAsset using AVExportSession to an given width, height. I've looked into AVMutableVideoComposition.renderSize and using a scaling transform on AVMutableVideoCompositionLayerInstruction but neither of these produce expected results (stretching/cropping the final output in weird ways). For example: * *I have a source video that is 640x320 and I want to scale it down to 320x160. *I set the renderSize to CGSizeMake(320.0, 160.0) *I set the transform to CGAffineTransformMakeScale(0.5, 0.5) *I get the following output where the video isn't scaled to the appropriate size (looks too big), stretched, and not centered (cropped). Any idea what is going on?
stackoverflow
{ "language": "en", "length": 117, "provenance": "stackexchange_0000F.jsonl.gz:853124", "question_score": "3", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44505716" }
3f39781e448ed0d52c660f9c71f0e01cf08b3cc9
Stackoverflow Stackexchange Q: Pandas - adding columns, matching on index I'm trying to figure out if Panda's, when adding two series together, automatically matches on index or if it simply adds by element position. If its just by position, is there a way to get it to add on index? I've looked at merging, but I'm not very clear if the key in this case can be the index of both... For example, if I have do DFs: df1 = index value 0 10 1 12 2 15 4 20 df2 = index value 0 10 1 10 3 10 4 10 and I want to add df1[total] = df1[value] + df2[value] = df1 = index value 0 20 1 22 2 15 3 10 4 30 Thanks for your help in advance! A: Because of the intrinsic data alignment in pandas, you can use add with fill_value=0 and it will sum these two series based on index alignment. df1.add(df2,fill_value=0) Input: df1 = pd.Series([10]*4,index=[0,1,3,4]) df2 = pd.Series([10,12,15,20], index=[0,1,2,4]) df1.add(df2,fill_value=0) Output: 0 20.0 1 22.0 2 15.0 3 10.0 4 30.0 dtype: float64
Q: Pandas - adding columns, matching on index I'm trying to figure out if Panda's, when adding two series together, automatically matches on index or if it simply adds by element position. If its just by position, is there a way to get it to add on index? I've looked at merging, but I'm not very clear if the key in this case can be the index of both... For example, if I have do DFs: df1 = index value 0 10 1 12 2 15 4 20 df2 = index value 0 10 1 10 3 10 4 10 and I want to add df1[total] = df1[value] + df2[value] = df1 = index value 0 20 1 22 2 15 3 10 4 30 Thanks for your help in advance! A: Because of the intrinsic data alignment in pandas, you can use add with fill_value=0 and it will sum these two series based on index alignment. df1.add(df2,fill_value=0) Input: df1 = pd.Series([10]*4,index=[0,1,3,4]) df2 = pd.Series([10,12,15,20], index=[0,1,2,4]) df1.add(df2,fill_value=0) Output: 0 20.0 1 22.0 2 15.0 3 10.0 4 30.0 dtype: float64 A: Just do this: pd.concat([df1,df2], axis=1).sum(axis=1) pd.concat will merge the 2(or more) frames and match based on index. sum(axis=1) just sums across the rows. Here's the working example: #create the example data df1 = pd.DataFrame({'index':[0,1,2,4],'value':[10,12,15,20]}).set_index('index') df2 = pd.DataFrame({'index':[0,1,3,4],'value':[10,10,10,10]}).set_index('index') The above will give you: In [7]: pd.concat([df1,df2],axis=1).sum(axis=1) Out[7]: index 0 20.0 1 22.0 2 15.0 3 10.0 4 30.0 dtype: float64
stackoverflow
{ "language": "en", "length": 240, "provenance": "stackexchange_0000F.jsonl.gz:853130", "question_score": "9", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44505738" }
f9ffb99fd57e63225293df189fc9f7f4dcd90c61
Stackoverflow Stackexchange Q: How to plot additional points on the top of scatter plot? I have panda dataframe as df with two attributes df.one (=x) and df.two (=y). Now, I want to plot scatter plot for these data points. I used ax1 = fig.add_subplot(111) ax1.scatter(df.one,df.two,c = 'g',marker = 'o',alpha = 0.2) Now, I want to plot centroid of the data points give by C. How should I overlay centroid on the above scatter plot? I tried: ax1.scatter(C[:,0],C[:,1],c = 'r',marker = 'x') But it overrides the scatter plot, I want to overlay on that. Is there any hold on option, similar to matlab? A: from matplotlib import pyplot as plt from statistics import * bill = [34.00, 108.00, 64.00, 88.00, 99.00, 51.00] tip = [ 5.00, 17.00, 11.00, 8.00, 14.00, 5.00] bill.sort() tip.sort() print(mean(bill)) print(mean(tip)) plt.scatter(bill, tip) plt.scatter([mean(bill)], [mean(tip)]) plt.show() I wanted to plot the mean of the data too, so I used this format and got this result: Result of data
Q: How to plot additional points on the top of scatter plot? I have panda dataframe as df with two attributes df.one (=x) and df.two (=y). Now, I want to plot scatter plot for these data points. I used ax1 = fig.add_subplot(111) ax1.scatter(df.one,df.two,c = 'g',marker = 'o',alpha = 0.2) Now, I want to plot centroid of the data points give by C. How should I overlay centroid on the above scatter plot? I tried: ax1.scatter(C[:,0],C[:,1],c = 'r',marker = 'x') But it overrides the scatter plot, I want to overlay on that. Is there any hold on option, similar to matlab? A: from matplotlib import pyplot as plt from statistics import * bill = [34.00, 108.00, 64.00, 88.00, 99.00, 51.00] tip = [ 5.00, 17.00, 11.00, 8.00, 14.00, 5.00] bill.sort() tip.sort() print(mean(bill)) print(mean(tip)) plt.scatter(bill, tip) plt.scatter([mean(bill)], [mean(tip)]) plt.show() I wanted to plot the mean of the data too, so I used this format and got this result: Result of data A: If you need points overlaid on the original plot, use ax.plot(x, y) ex. ax = plt.subplot(1, 1, 1) ax.scatter([1, 2, 3], [1, 2, 3]) ax.plot(1.5, 1.5, "or") if you pass a list to x and y, multiple points can be added to the plot. Also in case you need to add some annotation beside the point, try ax.annotate("Some explanation", x, y)
stackoverflow
{ "language": "en", "length": 221, "provenance": "stackexchange_0000F.jsonl.gz:853139", "question_score": "8", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44505762" }
6807dd0bffdef258115638cc3fdbf480040bbbab
Stackoverflow Stackexchange Q: C++ mutex locking thread priority I'm looking to give priority to a thread so that if two threads are waiting for a mutex, the thread with the highest priority will always take the mutex before the lower priority one. A colleague suggested that by changing the thread priority of my thread I should achieve that. I tried using the SetThreadPriority() function to set one of the waiting threads to 0(normal) and the other one to 2 (highest) but it doesn't affect the mutex behavior like I was hoping. The lock currently always goes to the first thread that requested the ownership. So is this behavior normal? Contrary to what my colleague thought? Is there a different way to give thread priority that I might be missing? Or am I looking at a more complicated problem to solve? A: Thread priority says how much time the thread gets on the CPU as determined by the scheduler, which will preferentially schedule higher priority threads - it doesn't affect the behaviour of mutexes, and I'm not aware of any means of making it do so.
Q: C++ mutex locking thread priority I'm looking to give priority to a thread so that if two threads are waiting for a mutex, the thread with the highest priority will always take the mutex before the lower priority one. A colleague suggested that by changing the thread priority of my thread I should achieve that. I tried using the SetThreadPriority() function to set one of the waiting threads to 0(normal) and the other one to 2 (highest) but it doesn't affect the mutex behavior like I was hoping. The lock currently always goes to the first thread that requested the ownership. So is this behavior normal? Contrary to what my colleague thought? Is there a different way to give thread priority that I might be missing? Or am I looking at a more complicated problem to solve? A: Thread priority says how much time the thread gets on the CPU as determined by the scheduler, which will preferentially schedule higher priority threads - it doesn't affect the behaviour of mutexes, and I'm not aware of any means of making it do so. A: Normally mutex are not biased. This can be implemented as a improvised mutex, which is biased with priority, as well manage starvation.
stackoverflow
{ "language": "en", "length": 206, "provenance": "stackexchange_0000F.jsonl.gz:853197", "question_score": "3", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44505992" }
8b1e41143a18d3e19cd63be0c9b41a4115b8e128
Stackoverflow Stackexchange Q: Is there reactive form in AngularDart? In a official AngularDart guide (https://webdev.dartlang.org/angular/guide/forms) there is a way how to build forms via templates driven way. I would like to use reactive forms (https://angular.io/docs/ts/latest/guide/reactive-forms.html) with AngularDart but I even can't find properly reference on FormGroup class (see a screenshot below). A: Seems added in AngularDart 5 NgFormControl-class
Q: Is there reactive form in AngularDart? In a official AngularDart guide (https://webdev.dartlang.org/angular/guide/forms) there is a way how to build forms via templates driven way. I would like to use reactive forms (https://angular.io/docs/ts/latest/guide/reactive-forms.html) with AngularDart but I even can't find properly reference on FormGroup class (see a screenshot below). A: Seems added in AngularDart 5 NgFormControl-class
stackoverflow
{ "language": "en", "length": 56, "provenance": "stackexchange_0000F.jsonl.gz:853272", "question_score": "3", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44506214" }
40addaf0e931aaae605cacf3bd8fcc91cca53350
Stackoverflow Stackexchange Q: How do I change the debug port in Visual Studio 2017? How to change debug port in Visual Studio 2017? I checked the property pages but to no avail. A: Go into the .sln file and edit the port there. For example, if it's currently on port: 50722 Then just do a replace: 50722 replace with: 50723 and it should build just fine. There should be about 5 spots it'll replace.
Q: How do I change the debug port in Visual Studio 2017? How to change debug port in Visual Studio 2017? I checked the property pages but to no avail. A: Go into the .sln file and edit the port there. For example, if it's currently on port: 50722 Then just do a replace: 50722 replace with: 50723 and it should build just fine. There should be about 5 spots it'll replace. A: Project file properties In VS 2017, I was able to update it in the project properties, instead of the solution properties. * *Right-click on your project file, then choose Properties. *On the Web tab find the Project Url section. It should say something like https://localhost:44348/ *Simply modify it to your desired port number and save. A: For me, I was able to find it with the following steps in Visual Studio 2017: * *Right click on the web project, then click Properties. *Click on the Debug tab. *Under the Profile IIS Express, you will find the port at the App URL box.
stackoverflow
{ "language": "en", "length": 175, "provenance": "stackexchange_0000F.jsonl.gz:853274", "question_score": "21", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44506216" }
955367151db20bb6e54220a3f3446f111d413687
Stackoverflow Stackexchange Q: Reserved Keyword in spark sql Folks, I wrote a pyspark program which reads json files and converts it into a dataframe. The problem i am facing is when i try to cast and rename a column which happens to be a reserved keywords. The column parent.sub.GROUP (group being the reserved word) Example ==> option 1 tempvar2 ="`parent.sub.group`" + ".cast(string).alias(parent_sub_group)" ==> option 2 tempvar2="\"parent.sub.group\"" + ".cast(string).alias(parent_sub_group)" #pass the variable into select dataframe2 = dataframe1.select(tempvar2) it is strange that it if i double quote column name it works fine in pyspark shell. if it works in shell it should work with my program as well but i am find it hard to figure out what i am missing. Any help / lead would be appreciated i have tried the below as well dataframe2= dataframe2.selectExpr("cast (parent.sub.group as string) communities_association_group" ) dataframe2 = dataframe2.selectExpr("cast (\"parent.sub.group\" as string) communities_association_group" ) dataframe2 = dataframe1.selectExpr("cast (\"`parent.sub.group`\" as string) communities_association_group" ) dataframe2 = dataframe2.selectExpr("cast (`parent.sub.group` as string) communities_association_group" ) Spark Version : 1.6
Q: Reserved Keyword in spark sql Folks, I wrote a pyspark program which reads json files and converts it into a dataframe. The problem i am facing is when i try to cast and rename a column which happens to be a reserved keywords. The column parent.sub.GROUP (group being the reserved word) Example ==> option 1 tempvar2 ="`parent.sub.group`" + ".cast(string).alias(parent_sub_group)" ==> option 2 tempvar2="\"parent.sub.group\"" + ".cast(string).alias(parent_sub_group)" #pass the variable into select dataframe2 = dataframe1.select(tempvar2) it is strange that it if i double quote column name it works fine in pyspark shell. if it works in shell it should work with my program as well but i am find it hard to figure out what i am missing. Any help / lead would be appreciated i have tried the below as well dataframe2= dataframe2.selectExpr("cast (parent.sub.group as string) communities_association_group" ) dataframe2 = dataframe2.selectExpr("cast (\"parent.sub.group\" as string) communities_association_group" ) dataframe2 = dataframe1.selectExpr("cast (\"`parent.sub.group`\" as string) communities_association_group" ) dataframe2 = dataframe2.selectExpr("cast (`parent.sub.group` as string) communities_association_group" ) Spark Version : 1.6
stackoverflow
{ "language": "en", "length": 166, "provenance": "stackexchange_0000F.jsonl.gz:853306", "question_score": "3", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44506322" }
f5a5b8b35acbc06bf1394dfbae6ff7b3be4b665f
Stackoverflow Stackexchange Q: Laravel 5.4: Class 'App\Http\Controllers\Response' not found error I am following the Laracast's API tutorial and trying to create an ApiController that all the other controllers extend. ApiController is responsible for response handling. class ApiController extends Controller { protected $statusCode; public function getStatusCode() { return $this->statusCode; } public function setStatusCode($statusCode) { $this->statusCode = $statusCode; } public function respondNotFound($message = 'Not Found!') { return Reponse::json([ 'error' => [ 'message' => $message, 'status_code' => $this->getStatusCode() ] ]); } } And i also have a ReportController that extends ApiController. class ReportController extends ApiController { /** * Display the specified resource. * * @param int $id * @return \Illuminate\Http\Response */ public function show($id) { $report = Report::find($id); if (! $report ) { $this->respondNotFound(Report does not exist.'); } return Response::json([ 'data'=> $this->ReportTransformer->transform($report) ], 200); } } When i try to call respondNotFound method from ReportController i get Class 'App\Http\Controllers\Response' not found error eventhough i add use Illuminate\Support\Facades\Response;to parent or child class i get the error. How can i fix this ? Any help would be appreciated. A: Since it's a facade, add this: use Response; Or use full namespace: return \Response::json(...); Or just use helper: return response()->json(...);
Q: Laravel 5.4: Class 'App\Http\Controllers\Response' not found error I am following the Laracast's API tutorial and trying to create an ApiController that all the other controllers extend. ApiController is responsible for response handling. class ApiController extends Controller { protected $statusCode; public function getStatusCode() { return $this->statusCode; } public function setStatusCode($statusCode) { $this->statusCode = $statusCode; } public function respondNotFound($message = 'Not Found!') { return Reponse::json([ 'error' => [ 'message' => $message, 'status_code' => $this->getStatusCode() ] ]); } } And i also have a ReportController that extends ApiController. class ReportController extends ApiController { /** * Display the specified resource. * * @param int $id * @return \Illuminate\Http\Response */ public function show($id) { $report = Report::find($id); if (! $report ) { $this->respondNotFound(Report does not exist.'); } return Response::json([ 'data'=> $this->ReportTransformer->transform($report) ], 200); } } When i try to call respondNotFound method from ReportController i get Class 'App\Http\Controllers\Response' not found error eventhough i add use Illuminate\Support\Facades\Response;to parent or child class i get the error. How can i fix this ? Any help would be appreciated. A: Since it's a facade, add this: use Response; Or use full namespace: return \Response::json(...); Or just use helper: return response()->json(...);
stackoverflow
{ "language": "en", "length": 192, "provenance": "stackexchange_0000F.jsonl.gz:853320", "question_score": "12", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44506361" }
8f918fd3ae8ae903d283bda4eeb1621cb7f827c9
Stackoverflow Stackexchange Q: Grunt not found error when deploying to Heroku I'm getting the error below: remote: sh: 1: grunt: not found when I'm deploying my code to Heroku server. My install scripts in package.json is like: "scripts": { "postinstall": "grunt heroku:production && bower install", "test": "mocha", "start": "node app.js" } I set heroku config like heroku config:set NPM_CONFIG_PRODUCTION=false Also I made export BUILDPACK_URL=https://github.com/mbuchetics/heroku-buildpack-nodejs-grunt.git but nothing has changed. I have grunt and grunt-cli in both dependencies and devDependencies. So where am I wrong?
Q: Grunt not found error when deploying to Heroku I'm getting the error below: remote: sh: 1: grunt: not found when I'm deploying my code to Heroku server. My install scripts in package.json is like: "scripts": { "postinstall": "grunt heroku:production && bower install", "test": "mocha", "start": "node app.js" } I set heroku config like heroku config:set NPM_CONFIG_PRODUCTION=false Also I made export BUILDPACK_URL=https://github.com/mbuchetics/heroku-buildpack-nodejs-grunt.git but nothing has changed. I have grunt and grunt-cli in both dependencies and devDependencies. So where am I wrong?
stackoverflow
{ "language": "en", "length": 81, "provenance": "stackexchange_0000F.jsonl.gz:853345", "question_score": "3", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44506443" }
b727bb556f152c8935bba2681bdff7e9612b5701
Stackoverflow Stackexchange Q: Using sips to create an icns file from a png file I can make a jpeg from a png: sips -s format jpeg myIcon.png --out myIcon.jpeg But the same command won't work on icns: sips -s format icns myIcon.png --out myIcon.icns # Error: Unable to write image to file ...myIcon.icns How can I get around this error? A: sips can only convert images into .icns if the initial image file is 256x256, 512x512, and as far as I've been able to verify, 1024x1024. You would have to first resize your image to one of these sizes, and then use the format option to convert the image to a .icns. The code below does this. First, resize the image to one of the resolutions above: sips -z 256 256 myIcon.png --out myIconResized.png OR sips -z 512 512 myIcon.png --out myIconResized.png OR sips -z 1024 1024 myIcon.png --out myIconResized.png Second, convert the resized image file to a .icns file. sips -s format icns myIconResized.png --out myIcon.icns Now you have a .icns file from the resized image file.
Q: Using sips to create an icns file from a png file I can make a jpeg from a png: sips -s format jpeg myIcon.png --out myIcon.jpeg But the same command won't work on icns: sips -s format icns myIcon.png --out myIcon.icns # Error: Unable to write image to file ...myIcon.icns How can I get around this error? A: sips can only convert images into .icns if the initial image file is 256x256, 512x512, and as far as I've been able to verify, 1024x1024. You would have to first resize your image to one of these sizes, and then use the format option to convert the image to a .icns. The code below does this. First, resize the image to one of the resolutions above: sips -z 256 256 myIcon.png --out myIconResized.png OR sips -z 512 512 myIcon.png --out myIconResized.png OR sips -z 1024 1024 myIcon.png --out myIconResized.png Second, convert the resized image file to a .icns file. sips -s format icns myIconResized.png --out myIcon.icns Now you have a .icns file from the resized image file.
stackoverflow
{ "language": "en", "length": 176, "provenance": "stackexchange_0000F.jsonl.gz:853425", "question_score": "5", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44506713" }
f625fa2fc4fe21d1be2d805f34d86085b86c572c
Stackoverflow Stackexchange Q: Kotlin compiler complains about using a SPeL expression in a property definition. WHY? When I try to use a SPeL expression to inject a value it works from in Java but NOT in Kotlin. The compiler says Error:(13, 25) Kotlin: An annotation parameter must be a compile-time constant Code: @SpringBootApplication open class DeDup(@Value("#{new java.io.File('${roots}')}") val roots: Set<File>, @Value("algo") val hashAlgo: String, @Value("types")val fileTypes: List<String>) { } fun main(args: Array<String>) { SpringApplication.run(DeDup::class.java, *args) } Mmm... news flash Kotlin compiler: It IS a constant! The compiler clearly knows it's a SPeL expression and doesn't like it. My questions: * *Why doesn't Kotlin like SPeL? This is a construction injection (or is it) and doesn't violate immutability. *Is this a compiler bug? The message is irrefutably wrong. A: ${roots} inside a String in Kotlin is a string template, therefore that String is not a constant. If you want the String to contain those actual characters and not be interpreted as a template, you'll have to escape the $: @Value("#{new java.io.File('\${roots}')}")
Q: Kotlin compiler complains about using a SPeL expression in a property definition. WHY? When I try to use a SPeL expression to inject a value it works from in Java but NOT in Kotlin. The compiler says Error:(13, 25) Kotlin: An annotation parameter must be a compile-time constant Code: @SpringBootApplication open class DeDup(@Value("#{new java.io.File('${roots}')}") val roots: Set<File>, @Value("algo") val hashAlgo: String, @Value("types")val fileTypes: List<String>) { } fun main(args: Array<String>) { SpringApplication.run(DeDup::class.java, *args) } Mmm... news flash Kotlin compiler: It IS a constant! The compiler clearly knows it's a SPeL expression and doesn't like it. My questions: * *Why doesn't Kotlin like SPeL? This is a construction injection (or is it) and doesn't violate immutability. *Is this a compiler bug? The message is irrefutably wrong. A: ${roots} inside a String in Kotlin is a string template, therefore that String is not a constant. If you want the String to contain those actual characters and not be interpreted as a template, you'll have to escape the $: @Value("#{new java.io.File('\${roots}')}")
stackoverflow
{ "language": "en", "length": 168, "provenance": "stackexchange_0000F.jsonl.gz:853434", "question_score": "9", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44506762" }
2dd0c15612e0f2bc538e10c0469f4bcd368e64c1
Stackoverflow Stackexchange Q: Setting emoji in string api 22 Hello I display String from resources wtih emoji coded in this way \uD83D\uDE09, on api 25 it works perfectly but on api 22 I got this error JNI DETECTED ERROR IN APPLICATION: input is not valid Modified UTF-8: illegal start byte 0xf0 when I delete emoji it works perfectly. How should I code emoji? I don't want to change min api
Q: Setting emoji in string api 22 Hello I display String from resources wtih emoji coded in this way \uD83D\uDE09, on api 25 it works perfectly but on api 22 I got this error JNI DETECTED ERROR IN APPLICATION: input is not valid Modified UTF-8: illegal start byte 0xf0 when I delete emoji it works perfectly. How should I code emoji? I don't want to change min api
stackoverflow
{ "language": "en", "length": 68, "provenance": "stackexchange_0000F.jsonl.gz:853440", "question_score": "3", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44506780" }
4b663ef01c987d0f2ebe05943fc11a50fd9e4c3f
Stackoverflow Stackexchange Q: Clone or copy a method in an object (Javascript) Suppose I have an object A: var A = { 'parameter': "Dura lex sed lex.", 'function_a': function (new_type) { console.log ("It's working!"); } }; Then, suppose I also an object B: var B = { 'parameter': "Veni vidi vici!" }; What I need is a simple way to dynamically create a method function_b() inside the object B without copy/clone the parameter, of the object A, ("Dura lex sed lex.") in the object B and to preserve the parameter ("Veni vidi vici!") of the object B. How can I do it? A: try it: B['function_b'] = A['function_a'];
Q: Clone or copy a method in an object (Javascript) Suppose I have an object A: var A = { 'parameter': "Dura lex sed lex.", 'function_a': function (new_type) { console.log ("It's working!"); } }; Then, suppose I also an object B: var B = { 'parameter': "Veni vidi vici!" }; What I need is a simple way to dynamically create a method function_b() inside the object B without copy/clone the parameter, of the object A, ("Dura lex sed lex.") in the object B and to preserve the parameter ("Veni vidi vici!") of the object B. How can I do it? A: try it: B['function_b'] = A['function_a']; A: You mean something like this? var A = { 'parameter': "Dura lex sed lex.", 'function_a': function (new_type) { console.log ("It's working!"); } }; var B = { 'parameter': "Vini vidi vici!" }; var clone = function(origin, target, prefix) { Object.keys(origin).forEach(function(key) { if (!target.hasOwnProperty(key)) { if (key.indexOf("function_") > -1) { target["function_" + prefix] = origin[key]; } } }); } clone(A, B, "b"); console.log(B); B.function_b(); A: I don't know if I understood your question, but I think you want something like this: B.function_b = function(whatever) { console.log('it works!'); };
stackoverflow
{ "language": "en", "length": 194, "provenance": "stackexchange_0000F.jsonl.gz:853445", "question_score": "3", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44506795" }
df7904cbb2556c04431f268ff045a73cd46efe16
Stackoverflow Stackexchange Q: Laravel: Database [default] not configured when using an Eloquent model, but works on Facade I have a model that extends Laravel's Eloquent model. When I attempt to use it, I get the error Database [default] not configured. However, when I simply call the DB facade, it works just fine. Code: use Illuminate\Database\Eloquent\Model; class Owners extends Model { public $timestamps = false; protected $guarded = ['id']; protected $connection = 'default'; public function records() { return $this->belongsTo(OwnersToRecords::class, 'owner_id', 'id'); } public function zones() { return $this->belongsTo(OwnersToZones::class, 'record_id', 'id'); } } When invoking by using $owner = new Owners, I get the error. When invoking by DB::table('owners'), the table works fine. So, what's wrong? A: So, what's wrong? This line is wrong, because there is no db connection with this name in your config/database.php file. protected $connection = 'default'; Either remove it completely, in which case a default connection would be used, or specify a correct database connection name from your config file. Note: Unless you have a very specific requirements i.e. working with multiple db's or absolutely need a separate independent connection for a model etc. you don't want to explicitly specify connection name in your model.
Q: Laravel: Database [default] not configured when using an Eloquent model, but works on Facade I have a model that extends Laravel's Eloquent model. When I attempt to use it, I get the error Database [default] not configured. However, when I simply call the DB facade, it works just fine. Code: use Illuminate\Database\Eloquent\Model; class Owners extends Model { public $timestamps = false; protected $guarded = ['id']; protected $connection = 'default'; public function records() { return $this->belongsTo(OwnersToRecords::class, 'owner_id', 'id'); } public function zones() { return $this->belongsTo(OwnersToZones::class, 'record_id', 'id'); } } When invoking by using $owner = new Owners, I get the error. When invoking by DB::table('owners'), the table works fine. So, what's wrong? A: So, what's wrong? This line is wrong, because there is no db connection with this name in your config/database.php file. protected $connection = 'default'; Either remove it completely, in which case a default connection would be used, or specify a correct database connection name from your config file. Note: Unless you have a very specific requirements i.e. working with multiple db's or absolutely need a separate independent connection for a model etc. you don't want to explicitly specify connection name in your model.
stackoverflow
{ "language": "en", "length": 196, "provenance": "stackexchange_0000F.jsonl.gz:853452", "question_score": "3", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44506820" }
13a90e42a48b5a4c63d1c918aa9db8244276c2cd
Stackoverflow Stackexchange Q: Share code between unit test and instrumentation tests when using kotlin Similar question: Sharing code between Android Instrumentation Tests and Unit Tests in Android Studio My setup is the following: * *src/test folder that contains unit tests. These can be either Java or Kotlin classes *src/androidTest that contains instrumentation tests. These can also be either Java or Kotlin classes *src/sharedTest is a folder that contains a bunch of utils that are shared between unit and instrumentation tests. This sharing is defined in gradle as: sourceSets { test.java.srcDirs += 'src/sharedTest/java' androidTest.java.srcDirs += 'src/sharedTest/java' } This allows any Java class in src/test or src/androidTest to access the utils. but not the Kotlin unit tests. My assumption is that they are not added to the sourceSets. My question is: how can I add them? I tried: sourceSets { test.kotlin.srcDirs += 'src/sharedTest/java' } But that doesn't seem to work. A: The default setup would be making the Kotlin source sets visible to the Java compiler and the IDE as well: android { sourceSets { main.java.srcDirs += 'src/main/kotlin' test.java.srcDirs += 'src/test/kotlin' test.java.srcDirs += 'src/sharedTest/java' androidTest.java.srcDirs += 'src/sharedTest/java' } } You don't need to configure the Kotlin source sets by itself.
Q: Share code between unit test and instrumentation tests when using kotlin Similar question: Sharing code between Android Instrumentation Tests and Unit Tests in Android Studio My setup is the following: * *src/test folder that contains unit tests. These can be either Java or Kotlin classes *src/androidTest that contains instrumentation tests. These can also be either Java or Kotlin classes *src/sharedTest is a folder that contains a bunch of utils that are shared between unit and instrumentation tests. This sharing is defined in gradle as: sourceSets { test.java.srcDirs += 'src/sharedTest/java' androidTest.java.srcDirs += 'src/sharedTest/java' } This allows any Java class in src/test or src/androidTest to access the utils. but not the Kotlin unit tests. My assumption is that they are not added to the sourceSets. My question is: how can I add them? I tried: sourceSets { test.kotlin.srcDirs += 'src/sharedTest/java' } But that doesn't seem to work. A: The default setup would be making the Kotlin source sets visible to the Java compiler and the IDE as well: android { sourceSets { main.java.srcDirs += 'src/main/kotlin' test.java.srcDirs += 'src/test/kotlin' test.java.srcDirs += 'src/sharedTest/java' androidTest.java.srcDirs += 'src/sharedTest/java' } } You don't need to configure the Kotlin source sets by itself. A: If your project has both java and kotlin code the key is to have: src/{folderName}/java and src/{folderName}/kotlin Where {folderName} is: test, androidTest, sharedTest or whatever. I use: android { sourceSets { androidTest.java.srcDirs += "src/androidTest/kotlin" androidTest.java.srcDirs += "src/sharedTest/java" androidTest.java.srcDirs += "src/sharedTest/kotlin" test.java.srcDirs += "src/test/kotlin" test.java.srcDirs += "src/sharedTest/java" test.java.srcDirs += "src/sharedTest/kotlin" } } This is some inconsistency as you can have all java and kotlin code under the same: main/java directory.
stackoverflow
{ "language": "en", "length": 266, "provenance": "stackexchange_0000F.jsonl.gz:853461", "question_score": "5", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44506838" }
c6e4e1c104706817e54e6506bee0a9bb6d98e708
Stackoverflow Stackexchange Q: Testing the availability of std::byte I'd like to use C++17's std::byte type if it's available, and fall back to using unsigned char if not, i.e. something along the lines of #include <cstddef> namespace my { #if SOMETHING using byte = std::byte; #else using byte = unsigned char; #endif } Unfortunately it seems that std::byte didn't come come with the usual feature test macro, so it's not obvious what SOMETHING above should be. (AFAIK the value of __cplusplus for '17 hasn't been set yet, so I can't test for that either.) So, does anybody know of a way to detect whether std::byte is available on the big three compilers? A: If you want to test availability of std::byte introduced by C++17, you should use __cpp_lib_byte macro (full list of feature testing macros is here). The sample usage: #include <iostream> #include <cstddef> #if __cpp_lib_byte using byte = std::byte; #else using byte = unsigned char; #endif int main() { return 0; }
Q: Testing the availability of std::byte I'd like to use C++17's std::byte type if it's available, and fall back to using unsigned char if not, i.e. something along the lines of #include <cstddef> namespace my { #if SOMETHING using byte = std::byte; #else using byte = unsigned char; #endif } Unfortunately it seems that std::byte didn't come come with the usual feature test macro, so it's not obvious what SOMETHING above should be. (AFAIK the value of __cplusplus for '17 hasn't been set yet, so I can't test for that either.) So, does anybody know of a way to detect whether std::byte is available on the big three compilers? A: If you want to test availability of std::byte introduced by C++17, you should use __cpp_lib_byte macro (full list of feature testing macros is here). The sample usage: #include <iostream> #include <cstddef> #if __cpp_lib_byte using byte = std::byte; #else using byte = unsigned char; #endif int main() { return 0; } A: For C++17, the value of __cplusplus is 201703L. But that's not super useful. Instead, you should look at SD-6. This document contains all the feature test macros for all features adopted into C++, and the vendors all agree to follow them. It's possible that an implementation provides a feature but not the macro (e.g. gcc 7.2 supports std::byte but does not provide the macro) and more obviously that an implementation provides a feature and a macro but the feature is buggy, but you should rely on the macro. That's what it's there for. Note that using unsigned char gives you almost the exact same behavior as std::byte - the only difference being that unsigned char supports extra arithmetic operations. So having an inexact check for std::byte support is okay - as long as you test on some compiler that supports std::byte to verify that you're not doing anything untoward, you're fine.
stackoverflow
{ "language": "en", "length": 312, "provenance": "stackexchange_0000F.jsonl.gz:853464", "question_score": "7", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44506845" }
a66f19f881656ced84dade6ad3df2122c2ff509b
Stackoverflow Stackexchange Q: Can't find @EnableSwagger annotation I just wanted to documente my spring rest application. Following the how to with springfox I added as a dependency in my pom.xml: <dependency> <groupId>io.springfox</groupId> <artifactId>springfox-swagger2</artifactId> <version>2.7.0</version> </dependency> the jar is downloaded correctly but I can't find @EnableSwagger2 annotation nowhere. A: Same here. springfox.documentation.swagger2 jar doesn't exist in springfox-core:2.7.0 They exist in the previous release of 2.6.1 when I was working with it before. I'm falling back to 2.6.1. It's probably not a bad idea to check their 2.7.0 release notes to see if there's any bug fix that you need though: https://github.com/springfox/springfox/releases/tag/2.7.0 UPDATE: @EnableSwagger is actually in springfox-swagger2-2.7.0.jar not springfox-core.jar.
Q: Can't find @EnableSwagger annotation I just wanted to documente my spring rest application. Following the how to with springfox I added as a dependency in my pom.xml: <dependency> <groupId>io.springfox</groupId> <artifactId>springfox-swagger2</artifactId> <version>2.7.0</version> </dependency> the jar is downloaded correctly but I can't find @EnableSwagger2 annotation nowhere. A: Same here. springfox.documentation.swagger2 jar doesn't exist in springfox-core:2.7.0 They exist in the previous release of 2.6.1 when I was working with it before. I'm falling back to 2.6.1. It's probably not a bad idea to check their 2.7.0 release notes to see if there's any bug fix that you need though: https://github.com/springfox/springfox/releases/tag/2.7.0 UPDATE: @EnableSwagger is actually in springfox-swagger2-2.7.0.jar not springfox-core.jar. A: It shoud be in springfox.documentation.swagger2.annotations package. A: try using @EnableSwagger2WebFlux and @EnableSwagger2WebMvc with the following dependency <dependency> <groupId>io.springfox</groupId> <artifactId>springfox-swagger-ui</artifactId> <version>3.0.0-SNAPSHOT</version> </dependency> <dependency> <groupId>io.springfox</groupId> <artifactId>springfox-swagger2</artifactId> <version>3.0.0-SNAPSHOT</version> </dependency> <dependency> <groupId>io.springfox</groupId> <artifactId>springfox-spring-webflux</artifactId> <version>3.0.0-SNAPSHOT</version> </dependency> A: i use swagger 2.8.0, @EnableSwagger2 cannot resolve, Then, I del the local maven repository ,then reImport it ,It is ok!
stackoverflow
{ "language": "en", "length": 159, "provenance": "stackexchange_0000F.jsonl.gz:853480", "question_score": "7", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44506883" }
528b24ab269509cd795264629f76d0075f96093f
Stackoverflow Stackexchange Q: JAX-RS GROUP Validation not recognized by Swagger JAX-RS Version 1.5.12 I have a scenario where a property has a different validation applied depending on end-point. This is done via "groups" (code example below & reference to dropwizard doc: http://www.dropwizard.io/0.9.3/docs/manual/validation.html#validated). I implemented the DropWizard validations but the Swagger does not recognizing the "groups" and therefore doesn't provided correct model description. In addition Swagger doesn't seem to have any annotations that would accomplish the scenario. Can you please help? Thank you. public interface Version1FirstName { } public interface Version2FirstName { } class Person { @NotEmpty(groups = Version1FirstName .class) @Length(max = 5, groups = Version2FirstName .class) private firstName; private lastName; } @POST @Path("/v1") public void addName(@Validated(Version1FirstName.class) Person person) {} @POST @Path("/v2") public void retriveName(@Validated({Version2FirstName.class, Version1FirstName.class}) Person person) {}
Q: JAX-RS GROUP Validation not recognized by Swagger JAX-RS Version 1.5.12 I have a scenario where a property has a different validation applied depending on end-point. This is done via "groups" (code example below & reference to dropwizard doc: http://www.dropwizard.io/0.9.3/docs/manual/validation.html#validated). I implemented the DropWizard validations but the Swagger does not recognizing the "groups" and therefore doesn't provided correct model description. In addition Swagger doesn't seem to have any annotations that would accomplish the scenario. Can you please help? Thank you. public interface Version1FirstName { } public interface Version2FirstName { } class Person { @NotEmpty(groups = Version1FirstName .class) @Length(max = 5, groups = Version2FirstName .class) private firstName; private lastName; } @POST @Path("/v1") public void addName(@Validated(Version1FirstName.class) Person person) {} @POST @Path("/v2") public void retriveName(@Validated({Version2FirstName.class, Version1FirstName.class}) Person person) {}
stackoverflow
{ "language": "en", "length": 126, "provenance": "stackexchange_0000F.jsonl.gz:853511", "question_score": "3", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44506965" }
c73f6a2d035b5d81b3fd5bfb950d3461f8480bfa
Stackoverflow Stackexchange Q: Why was session_store.rb removed in Rails 5.1? Rails 5.1 removes the config/initializers/session_store.rb file. The upgrading guide doesn't mention it. Can someone explain why it was removed and what we should do with our existing file? My current file looks like the following: Rails.application.config.session_store( :cookie_store, :key => '_foo_session', # any value :secure => Rails.env.production? || Rails.env.staging?, # Only send cookie over SSL when in production and staging :http_only => true # Don't allow Javascript to access the cookie (mitigates cookie-based XSS exploits) )
Q: Why was session_store.rb removed in Rails 5.1? Rails 5.1 removes the config/initializers/session_store.rb file. The upgrading guide doesn't mention it. Can someone explain why it was removed and what we should do with our existing file? My current file looks like the following: Rails.application.config.session_store( :cookie_store, :key => '_foo_session', # any value :secure => Rails.env.production? || Rails.env.staging?, # Only send cookie over SSL when in production and staging :http_only => true # Don't allow Javascript to access the cookie (mitigates cookie-based XSS exploits) )
stackoverflow
{ "language": "en", "length": 83, "provenance": "stackexchange_0000F.jsonl.gz:853549", "question_score": "4", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44507072" }
84da42582feafb9bb32de5e3b69206e74bf1470e
Stackoverflow Stackexchange Q: How to get Mac's terminal to use Brew's PHP version? I'm currently trying to get PHP 7.1 on my mac. I followed this guide and successfully installed and linked php71 https://developerjack.com/blog/2016/08/26/Installing-PHP71-with-homebrew/ However, when running php -v from the terminal (after restarting computer, after running source bashprofile etc.), I still get PHP 5.6.30 (cli). Is there a way to get the terminal to use brews install instead of the version that comes with mac? A: Various ways: * *make a softlink from your php location to /usr/local/bin e.g. ln -s /usr/local/Cellar/php /usr/local/bin/php and make sure /usr/local/bin is before /usr/bin in your path variable. *make an alias in .bash_profile (restart your terminal) alias php = /yourpath
Q: How to get Mac's terminal to use Brew's PHP version? I'm currently trying to get PHP 7.1 on my mac. I followed this guide and successfully installed and linked php71 https://developerjack.com/blog/2016/08/26/Installing-PHP71-with-homebrew/ However, when running php -v from the terminal (after restarting computer, after running source bashprofile etc.), I still get PHP 5.6.30 (cli). Is there a way to get the terminal to use brews install instead of the version that comes with mac? A: Various ways: * *make a softlink from your php location to /usr/local/bin e.g. ln -s /usr/local/Cellar/php /usr/local/bin/php and make sure /usr/local/bin is before /usr/bin in your path variable. *make an alias in .bash_profile (restart your terminal) alias php = /yourpath
stackoverflow
{ "language": "en", "length": 115, "provenance": "stackexchange_0000F.jsonl.gz:853574", "question_score": "3", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44507136" }
190c7021ca18071c109b44c4df33c5d24c086eb2
Stackoverflow Stackexchange Q: how to check if current user is in admin group c# I have read the relevant Stack Overflow questions and tried out the following code: WindowsIdentity identity = WindowsIdentity.GetCurrent(); if (null != identity) { WindowsPrincipal principal = new WindowsPrincipal(identity); return principal.IsInRole(WindowsBuiltInRole.Administrator); } return false; It does not return true even though I have manually confirmed that the current user is a member of the local built-in Administrators group. What am I missing ? Thanks. A: Just found other way to check if user is admin, not running application as admin: private static bool IsAdmin() { WindowsIdentity identity = WindowsIdentity.GetCurrent(); if (identity != null) { WindowsPrincipal principal = new WindowsPrincipal(identity); List<Claim> list = new List<Claim>(principal.UserClaims); Claim c = list.Find(p => p.Value.Contains("S-1-5-32-544")); if (c != null) return true; } return false; } Credit to this answer, but code is corrected a bit.
Q: how to check if current user is in admin group c# I have read the relevant Stack Overflow questions and tried out the following code: WindowsIdentity identity = WindowsIdentity.GetCurrent(); if (null != identity) { WindowsPrincipal principal = new WindowsPrincipal(identity); return principal.IsInRole(WindowsBuiltInRole.Administrator); } return false; It does not return true even though I have manually confirmed that the current user is a member of the local built-in Administrators group. What am I missing ? Thanks. A: Just found other way to check if user is admin, not running application as admin: private static bool IsAdmin() { WindowsIdentity identity = WindowsIdentity.GetCurrent(); if (identity != null) { WindowsPrincipal principal = new WindowsPrincipal(identity); List<Claim> list = new List<Claim>(principal.UserClaims); Claim c = list.Find(p => p.Value.Contains("S-1-5-32-544")); if (c != null) return true; } return false; } Credit to this answer, but code is corrected a bit. A: The code you have above seemed to only work if running as an administrator, however you can query to see if the user belongs to the local administrators group (without running as an administrator) by doing something like the code below. Note, however, that the group name is hard-coded, so I guess you would have some localization work to do if you want to run it on operating systems of different languages. using (var pc = new PrincipalContext(ContextType.Domain, Environment.UserDomainName)) { using (var up = UserPrincipal.FindByIdentity(pc, WindowsIdentity.GetCurrent().Name)) { return up.GetAuthorizationGroups().Any(group => group.Name == "Administrators"); } } Note that you can also get a list of ALL the groups the user is a member of by doing this inside the second using block: var allGroups = up.GetAuthorizationGroups(); But this will be much slower depending on how many groups they're a member of. For example, I'm in 638 groups and it takes 15 seconds when I run it.
stackoverflow
{ "language": "en", "length": 297, "provenance": "stackexchange_0000F.jsonl.gz:853580", "question_score": "3", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44507149" }
430961fb26b3a620a0e3829f94bead51509cbdc8
Stackoverflow Stackexchange Q: Meteor-React Error: Target Container is not a DOM element, after fix I copy+paste the code from : https://stackoverflow.com/questions/41514549/ Then, I fix error and change 'class' by 'id' so: main.html <head> <title>React Meteor Voting</title> </head> <body> <div id="render-target"></div> </body> main.jsx import React, { Component } from 'react'; import {Meteor} from 'meteor/meteor'; import { render } from 'react-dom'; Meteor.startup(() => { render(<App />, document.getElementById('render-target')); }); class App extends Component { render(){ return ( <h1>Hello!</h1> ); } } package.json { "name": "test-react", "private": true, "scripts": { "start": "meteor run" }, "dependencies": { "babel-runtime": "^6.20.0", "meteor-node-stubs": "~0.2.4", "react": "^15.5.4", "react-dom": "^15.5.4" } } But I got the same error: Uncaught Error: _registerComponent(...): Target container is not a DOM element. at invariant (modules.js?hash=de726ed…:12672) at Object._renderNewRootComponent (modules.js?hash=de726ed…:30752) at Object._renderSubtreeIntoContainer (modules.js?hash=de726ed…:30842) at render (modules.js?hash=de726ed…:30863) at app.js?hash=71ef103…:46 at maybeReady (meteor.js?hash=27829e9…:809) at HTMLDocument.loadingCompleted (meteor.js?hash=27829e9…:821) Is driving me crazy.... ¡¡¡¡¡ A: For me I just needed to import the .html file before I tried to render to the DOM. import './main.html';
Q: Meteor-React Error: Target Container is not a DOM element, after fix I copy+paste the code from : https://stackoverflow.com/questions/41514549/ Then, I fix error and change 'class' by 'id' so: main.html <head> <title>React Meteor Voting</title> </head> <body> <div id="render-target"></div> </body> main.jsx import React, { Component } from 'react'; import {Meteor} from 'meteor/meteor'; import { render } from 'react-dom'; Meteor.startup(() => { render(<App />, document.getElementById('render-target')); }); class App extends Component { render(){ return ( <h1>Hello!</h1> ); } } package.json { "name": "test-react", "private": true, "scripts": { "start": "meteor run" }, "dependencies": { "babel-runtime": "^6.20.0", "meteor-node-stubs": "~0.2.4", "react": "^15.5.4", "react-dom": "^15.5.4" } } But I got the same error: Uncaught Error: _registerComponent(...): Target container is not a DOM element. at invariant (modules.js?hash=de726ed…:12672) at Object._renderNewRootComponent (modules.js?hash=de726ed…:30752) at Object._renderSubtreeIntoContainer (modules.js?hash=de726ed…:30842) at render (modules.js?hash=de726ed…:30863) at app.js?hash=71ef103…:46 at maybeReady (meteor.js?hash=27829e9…:809) at HTMLDocument.loadingCompleted (meteor.js?hash=27829e9…:821) Is driving me crazy.... ¡¡¡¡¡ A: For me I just needed to import the .html file before I tried to render to the DOM. import './main.html'; A: meteor remove blaze-html-templates meteor add static-html A: Basically, the problem occurs due to HTML rendering. When you create meteor app it comes up with the blaze by default & you are working on the meteor with react or meteor with angular. You solve this error by two methods. Method 1 just add import statement in main.js import './main.html'; Method 2 Preferrable as it is my choice meteor remove blaze-html-templates meteor add static-html A: If you removed blaze-html-templates you need to add static-html package to compile your index.html and avoid this error (see Meteor Guide, end of paragraph): meteor add static-html A: I had the same problem. This is how I solved it. In your terminal type the following lines in the project directory. meteor remove blaze-html-templates meteor add static-html A: Add your script tag to before the closing </body> tag most likely the script loaded before your DOM ID. also switch this around... import React, { Component } from 'react'; import {Meteor} from 'meteor/meteor'; import { render } from 'react-dom'; class App extends Component { render(){ return ( <h1>Hello!</h1> ); } } Meteor.startup(() => { render(<App />, document.getElementById('render-target')); }); A: First remove blaze template dependency meteor remove blaze-html-templates Then add static html meteor add static-html A: I had removed the package (blaze-html-templates) . I assumed that being a Meteor+React would not be necessary but it is used to compile the main.html. Adding the package blaze-html-templates with meteor add blaze-html-templates solve the problem. A: In my case the fix was simple. In HTML change class to id. render( <h1>Hello</h1>, document.getElementById("app")) <body> <div class="app"><`enter code here`/div> </body> <body> <div id="app"><`enter code here`/div> </body> A: This is an old post but it's #1 in google results so... I was getting this error on my android build but not web, already removed blaze and added statichtml.. My issue ended up being that I had a favicon and manifest.json being imported in my html but I hadn't gotten around to creating them yet. I removed the links and it worked like a charm. Hopefully this can save someone a few hours of digging.
stackoverflow
{ "language": "en", "length": 511, "provenance": "stackexchange_0000F.jsonl.gz:853583", "question_score": "14", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44507161" }
1bab808dc9e779a82aba9082acf32e4bc9dd1293
Stackoverflow Stackexchange Q: Can I submit code written in Swift 4 to the App Store with Xcode 8.3.2? I would like to use the Xcode 9 beta for use in development but I can't seem to get the correct settings for using Swift 3.2 within Xcode 9. The editor is much quicker than Xcode 8.3.2 I can (and have) downloaded the Swift 4 toolchain from swift.org and can use it from within both Xcode 8.3 and the Xcode 9 beta. If I wanted to ship my app before Xcode 9 is officially released, could I write it in Swift 4 and then submit it to the App Store via Xcode 8.3 using the Swift 4 toolchain? A: No, you cannot submit a binary that contains code built with the Swift 4 toolchain (or any development toolchain for that matter) to the Apple App Store using Xcode 8.3.2. Jordan Rose, a Swift compiler engineer at Apple, tweeted: Development toolchains are never used when submitting to the store, sorry! (We want to avoid XcodeGhost-like scenarios.) Thus, this configuration is not supported and you will need to wait for Xcode 9 to ship projects using Swift 4. Your binary will be rejected.
Q: Can I submit code written in Swift 4 to the App Store with Xcode 8.3.2? I would like to use the Xcode 9 beta for use in development but I can't seem to get the correct settings for using Swift 3.2 within Xcode 9. The editor is much quicker than Xcode 8.3.2 I can (and have) downloaded the Swift 4 toolchain from swift.org and can use it from within both Xcode 8.3 and the Xcode 9 beta. If I wanted to ship my app before Xcode 9 is officially released, could I write it in Swift 4 and then submit it to the App Store via Xcode 8.3 using the Swift 4 toolchain? A: No, you cannot submit a binary that contains code built with the Swift 4 toolchain (or any development toolchain for that matter) to the Apple App Store using Xcode 8.3.2. Jordan Rose, a Swift compiler engineer at Apple, tweeted: Development toolchains are never used when submitting to the store, sorry! (We want to avoid XcodeGhost-like scenarios.) Thus, this configuration is not supported and you will need to wait for Xcode 9 to ship projects using Swift 4. Your binary will be rejected.
stackoverflow
{ "language": "en", "length": 197, "provenance": "stackexchange_0000F.jsonl.gz:853586", "question_score": "4", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44507171" }
a3b608a631e26de4a68e3bb0fc52b7604a6d1408
Stackoverflow Stackexchange Q: C# Interactive Console.Readkey() hangs When I type Console.Readkey() on the C# interactive window on Visual Studio 2015, it just hangs (typing any number of characters does not seem to do anything). Is this an incorrect setting or am I not understanding how the Interactive Window works with the Console? A: c# interactive currently doesn't support redirecting input so if you use any of the Console.Read Methods it will just freeze, you can fix this by clicking the reset button.
Q: C# Interactive Console.Readkey() hangs When I type Console.Readkey() on the C# interactive window on Visual Studio 2015, it just hangs (typing any number of characters does not seem to do anything). Is this an incorrect setting or am I not understanding how the Interactive Window works with the Console? A: c# interactive currently doesn't support redirecting input so if you use any of the Console.Read Methods it will just freeze, you can fix this by clicking the reset button.
stackoverflow
{ "language": "en", "length": 80, "provenance": "stackexchange_0000F.jsonl.gz:853589", "question_score": "3", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44507179" }
4252e7ed2d9e4a977e4b9286a232eef29bb3eee7
Stackoverflow Stackexchange Q: Ionic 3 Google Maps Integration I am following this tutorial exactly: https://www.djamware.com/post/58f4da2080aca7414e78a638/step-by-step-tutorial-of-ionic-3-angular-4-and-google-maps-directions-service But I cannot get it to work. I have the API key set up no problem, but for some reason I keep getting the error Error: Uncaught (in promise): ReferenceError: google is not defined ReferenceError: google is not defined I am running the app using ionic lab For some reason it isn't working. Can someone help me find the problem? I have tried adding the cordova whitelist plugin, changing the https to http in the API key part, but still it isn't working. A: Did you declare the variable at home.ts? import { Component, ViewChild, ElementRef } from '@angular/core'; import { IonicPage } from 'ionic-angular'; import { NavController } from 'ionic-angular'; declare var google; @IonicPage() @Component({ selector: 'page-home', templateUrl: 'home.html' }) ...
Q: Ionic 3 Google Maps Integration I am following this tutorial exactly: https://www.djamware.com/post/58f4da2080aca7414e78a638/step-by-step-tutorial-of-ionic-3-angular-4-and-google-maps-directions-service But I cannot get it to work. I have the API key set up no problem, but for some reason I keep getting the error Error: Uncaught (in promise): ReferenceError: google is not defined ReferenceError: google is not defined I am running the app using ionic lab For some reason it isn't working. Can someone help me find the problem? I have tried adding the cordova whitelist plugin, changing the https to http in the API key part, but still it isn't working. A: Did you declare the variable at home.ts? import { Component, ViewChild, ElementRef } from '@angular/core'; import { IonicPage } from 'ionic-angular'; import { NavController } from 'ionic-angular'; declare var google; @IonicPage() @Component({ selector: 'page-home', templateUrl: 'home.html' }) ... A: Ionic 3 Google Map click doc link 1.install this package $ ionic cordova plugin add https://github.com/mapsplugin/cordova-plugin-googlemaps#multiple_maps --variable API_KEY_FOR_ANDROID="YOUR_ANDROID_API_KEY_IS_HERE" --variable API_KEY_FOR_IOS="YOUR_IOS_API_KEY_IS_HERE" $ npm install --save @ionic-native/google-maps 2.Android and ios Api create Go to this site - google cloud platform 3.After you got Api install packages 4.import app.module.ts import { GoogleMaps } from '@ionic-native/google-maps'; ...... provider:[ GoogleMaps ]; 5.home.html <ion-header> <ion-navbar> <ion-title> Ionic Blank </ion-title> </ion-navbar> </ion-header> <ion-content> <div id="map"></div> </ion-content> 6.home.ts import { Component } from '@angular/core'; import { NavController, Platform } from 'ionic-angular'; import { GoogleMaps, GoogleMap, GoogleMapsEvent, LatLng, CameraPosition, MarkerOptions, Marker } from '@ionic-native/google-maps'; @Component({ selector: 'page-home', templateUrl: 'home.html' }) export class HomePage { constructor(public navCtrl: NavController, private googleMaps: GoogleMaps, public platform: Platform) { platform.ready().then(()=>{ this.loadMap(); }) } loadMap() { // create a new map by passing HTMLElement let element: HTMLElement = document.getElementById('map'); let map: GoogleMap = this.googleMaps.create(element); // listen to MAP_READY event // You must wait for this event to fire before adding something to the map or modifying it in anyway map.one(GoogleMapsEvent.MAP_READY).then( () => { console.log('Map is ready!'); // Now you can add elements to the map like the marker } ); // create CameraPosition let position: CameraPosition = { target: { lat: 43.0741904, lng: -89.3809802 }, zoom: 18, tilt: 30 }; // move the map's camera to position map.moveCamera(position); // create new marker let markerOptions: MarkerOptions = { //position: ionic, title: 'Ionic' }; map.addMarker(markerOptions) .then((marker: Marker) => { marker.showInfoWindow(); }); } }
stackoverflow
{ "language": "en", "length": 372, "provenance": "stackexchange_0000F.jsonl.gz:853611", "question_score": "3", "source": "stackexchange", "timestamp": "2023-03-29T00:00:00", "url": "https://stackoverflow.com/questions/44507264" }