content
stringlengths
86
88.9k
title
stringlengths
0
150
question
stringlengths
1
35.8k
answers
sequence
answers_scores
sequence
non_answers
sequence
non_answers_scores
sequence
tags
sequence
name
stringlengths
30
130
Q: Restrict jwt refresh token to only one endpoint I have implemented JWT token authorization & authentication from Spring resource server dependency. Here is the config file: @Configuration @RequiredArgsConstructor @EnableWebSecurity public class WebSecurityConfig { @Value("${app.chat.jwt.public.key}") private RSAPublicKey publicKey; @Value("${app.chat.jwt.private.key}") private RSAPrivateKey privateKey; @Bean public SecurityFilterChain filterChain(HttpSecurity http) throws Exception { http.cors().and().csrf().disable(); http.sessionManagement().sessionCreationPolicy(SessionCreationPolicy.STATELESS); http.exceptionHandling( exceptions -> exceptions .authenticationEntryPoint(new BearerTokenAuthenticationEntryPoint()) .accessDeniedHandler(new BearerTokenAccessDeniedHandler())); http.authorizeHttpRequests() .requestMatchers("/auth/sign-in").permitAll() .requestMatchers("/auth/sign-up").permitAll() .anyRequest().authenticated() .and() .httpBasic(Customizer.withDefaults()) .oauth2ResourceServer(OAuth2ResourceServerConfigurer::jwt); return http.build(); } @SneakyThrows @Bean public JwtEncoder jwtEncoder() { var jwk = new RSAKey.Builder(publicKey).privateKey(privateKey).build(); var jwks = new ImmutableJWKSet<>(new JWKSet(jwk)); return new NimbusJwtEncoder(jwks); } @SneakyThrows @Bean public JwtDecoder jwtDecoder() { return NimbusJwtDecoder.withPublicKey(publicKey).build(); } @Bean public JwtAuthenticationConverter jwtAuthenticationConverter() { var jwtGrantedAuthoritiesConverter = new JwtGrantedAuthoritiesConverter(); jwtGrantedAuthoritiesConverter.setAuthoritiesClaimName("roles"); jwtGrantedAuthoritiesConverter.setAuthorityPrefix("ROLE_"); var jwtAuthenticationConverter = new JwtAuthenticationConverter(); jwtAuthenticationConverter.setJwtGrantedAuthoritiesConverter(jwtGrantedAuthoritiesConverter); return jwtAuthenticationConverter; } @Bean public CorsFilter corsFilter() { UrlBasedCorsConfigurationSource source = new UrlBasedCorsConfigurationSource(); CorsConfiguration config = new CorsConfiguration(); config.setAllowCredentials(true); config.addAllowedOrigin("*"); config.addAllowedHeader("*"); config.addAllowedMethod("*"); source.registerCorsConfiguration("/**", config); return new CorsFilter(source); } @Bean public PasswordEncoder passwordEncoder() { return new BCryptPasswordEncoder(); } @Bean public AuthenticationManager authenticationManager( AuthenticationConfiguration authenticationConfiguration) throws Exception { return authenticationConfiguration.getAuthenticationManager(); } } It works fine. I have AuthController where I have implemented endpoints for sign-in, sign-up, and refresh token. In each endpoint, I return a response with an access token and a refresh token. Here is the controller: @RestController @RequestMapping("/auth") @RequiredArgsConstructor public class AuthController { private final JwtTokenService tokenService; private final AuthenticationManager authManager; private final UserDetailsService usrDetailsService; private final UserService userService; record LoginRequest(String username, String password) {} @PostMapping("/sign-in") public TokensResponse login(@RequestBody LoginRequest request) { UsernamePasswordAuthenticationToken authenticationToken = new UsernamePasswordAuthenticationToken(request.username, request.password); authManager.authenticate(authenticationToken); var user = (User) usrDetailsService.loadUserByUsername(request.username); String accessToken = tokenService.generateAccessToken(user); String refreshToken = tokenService.generateRefreshToken(user); return new TokensResponse(accessToken, refreshToken); } record SignUpRequest(String username, String password){} @PostMapping("/sign-up") public TokensResponse signUp(@RequestBody SignUpRequest signUpRequest) { User registeredUser = userService.register(new AuthRequestDto(signUpRequest.username(), signUpRequest.password())); String accessToken = tokenService.generateAccessToken(registeredUser); String refreshToken = tokenService.generateRefreshToken(registeredUser); return new TokensResponse(accessToken, refreshToken); } @PreAuthorize("hasRole('REFRESH_TOKEN')") @GetMapping("/token/refresh") public TokensResponse refreshToken(HttpServletRequest request) { String headerAuth = request.getHeader("Authorization"); String previousRefreshToken = headerAuth.substring(7); String username = tokenService.parseToken(previousRefreshToken); var user = (User) usrDetailsService.loadUserByUsername(username); String accessToken = tokenService.generateAccessToken(user); String refreshToken = tokenService.generateRefreshToken(user); return new TokensResponse(accessToken, refreshToken); } record TokensResponse(String accessToken, String refreshToken) {} } And here is TokenService class where I generate those tokens: @Service @RequiredArgsConstructor public class JwtTokenServiceImpl implements JwtTokenService { private final JwtEncoder jwtEncoder; @Override public String generateAccessToken(User user) { Instant now = Instant.now(); String scope = user.getAuthorities().stream() .map(GrantedAuthority::getAuthority) .collect(Collectors.joining(" ")); JwtClaimsSet claims = JwtClaimsSet.builder() .issuer("self") .issuedAt(now) .expiresAt(now.plus(2, ChronoUnit.MINUTES)) .subject(user.getUsername()) .claim("scope", scope) .build(); return this.jwtEncoder.encode(JwtEncoderParameters.from(claims)).getTokenValue(); } @Override public String generateRefreshToken(User user) { Instant now = Instant.now(); String scope = "ROLE_REFRESH_TOKEN"; JwtClaimsSet claims = JwtClaimsSet.builder() .issuer("self") .issuedAt(now) .expiresAt(now.plus(10, ChronoUnit.MINUTES)) .subject(user.getUsername()) .claim("scope", scope) .build(); return this.jwtEncoder.encode(JwtEncoderParameters.from(claims)).getTokenValue(); } @Override public String parseToken(String token) { try { SignedJWT decodedJWT = SignedJWT.parse(token); return decodedJWT.getJWTClaimsSet().getSubject(); } catch (ParseException e) { e.printStackTrace(); } return null; } } What I want to do is to restrict the refresh token to be used only for the refresh endpoint. Because what's the point of having a short-term live access token if you can use a refresh token for all endpoints? I have tried to give the refresh token scope REFRESH_TOKEN and added @PreAuthorize("hasRole('REFRESH_TOKEN')") annotation for the refresh token endpoint. But it doesn't work(I can still send access token to refresh endpoint and get new tokens), because Spring doesn't look for claims from token. He just loads the user from the database by username from token and checks his roles from here. Please suggest how can I make the refresh token restricted only to one endpoint. Also would be great to make him one-time use but seems that I would need to store tokens somewhere for that. A: the refresh token is bound to the client to which it was issued. source https://www.rfc-editor.org/rfc/rfc6749#section-6 Refresh token affect to client scope (it means all end-points). Therefore your expected is not feasibility.
Restrict jwt refresh token to only one endpoint
I have implemented JWT token authorization & authentication from Spring resource server dependency. Here is the config file: @Configuration @RequiredArgsConstructor @EnableWebSecurity public class WebSecurityConfig { @Value("${app.chat.jwt.public.key}") private RSAPublicKey publicKey; @Value("${app.chat.jwt.private.key}") private RSAPrivateKey privateKey; @Bean public SecurityFilterChain filterChain(HttpSecurity http) throws Exception { http.cors().and().csrf().disable(); http.sessionManagement().sessionCreationPolicy(SessionCreationPolicy.STATELESS); http.exceptionHandling( exceptions -> exceptions .authenticationEntryPoint(new BearerTokenAuthenticationEntryPoint()) .accessDeniedHandler(new BearerTokenAccessDeniedHandler())); http.authorizeHttpRequests() .requestMatchers("/auth/sign-in").permitAll() .requestMatchers("/auth/sign-up").permitAll() .anyRequest().authenticated() .and() .httpBasic(Customizer.withDefaults()) .oauth2ResourceServer(OAuth2ResourceServerConfigurer::jwt); return http.build(); } @SneakyThrows @Bean public JwtEncoder jwtEncoder() { var jwk = new RSAKey.Builder(publicKey).privateKey(privateKey).build(); var jwks = new ImmutableJWKSet<>(new JWKSet(jwk)); return new NimbusJwtEncoder(jwks); } @SneakyThrows @Bean public JwtDecoder jwtDecoder() { return NimbusJwtDecoder.withPublicKey(publicKey).build(); } @Bean public JwtAuthenticationConverter jwtAuthenticationConverter() { var jwtGrantedAuthoritiesConverter = new JwtGrantedAuthoritiesConverter(); jwtGrantedAuthoritiesConverter.setAuthoritiesClaimName("roles"); jwtGrantedAuthoritiesConverter.setAuthorityPrefix("ROLE_"); var jwtAuthenticationConverter = new JwtAuthenticationConverter(); jwtAuthenticationConverter.setJwtGrantedAuthoritiesConverter(jwtGrantedAuthoritiesConverter); return jwtAuthenticationConverter; } @Bean public CorsFilter corsFilter() { UrlBasedCorsConfigurationSource source = new UrlBasedCorsConfigurationSource(); CorsConfiguration config = new CorsConfiguration(); config.setAllowCredentials(true); config.addAllowedOrigin("*"); config.addAllowedHeader("*"); config.addAllowedMethod("*"); source.registerCorsConfiguration("/**", config); return new CorsFilter(source); } @Bean public PasswordEncoder passwordEncoder() { return new BCryptPasswordEncoder(); } @Bean public AuthenticationManager authenticationManager( AuthenticationConfiguration authenticationConfiguration) throws Exception { return authenticationConfiguration.getAuthenticationManager(); } } It works fine. I have AuthController where I have implemented endpoints for sign-in, sign-up, and refresh token. In each endpoint, I return a response with an access token and a refresh token. Here is the controller: @RestController @RequestMapping("/auth") @RequiredArgsConstructor public class AuthController { private final JwtTokenService tokenService; private final AuthenticationManager authManager; private final UserDetailsService usrDetailsService; private final UserService userService; record LoginRequest(String username, String password) {} @PostMapping("/sign-in") public TokensResponse login(@RequestBody LoginRequest request) { UsernamePasswordAuthenticationToken authenticationToken = new UsernamePasswordAuthenticationToken(request.username, request.password); authManager.authenticate(authenticationToken); var user = (User) usrDetailsService.loadUserByUsername(request.username); String accessToken = tokenService.generateAccessToken(user); String refreshToken = tokenService.generateRefreshToken(user); return new TokensResponse(accessToken, refreshToken); } record SignUpRequest(String username, String password){} @PostMapping("/sign-up") public TokensResponse signUp(@RequestBody SignUpRequest signUpRequest) { User registeredUser = userService.register(new AuthRequestDto(signUpRequest.username(), signUpRequest.password())); String accessToken = tokenService.generateAccessToken(registeredUser); String refreshToken = tokenService.generateRefreshToken(registeredUser); return new TokensResponse(accessToken, refreshToken); } @PreAuthorize("hasRole('REFRESH_TOKEN')") @GetMapping("/token/refresh") public TokensResponse refreshToken(HttpServletRequest request) { String headerAuth = request.getHeader("Authorization"); String previousRefreshToken = headerAuth.substring(7); String username = tokenService.parseToken(previousRefreshToken); var user = (User) usrDetailsService.loadUserByUsername(username); String accessToken = tokenService.generateAccessToken(user); String refreshToken = tokenService.generateRefreshToken(user); return new TokensResponse(accessToken, refreshToken); } record TokensResponse(String accessToken, String refreshToken) {} } And here is TokenService class where I generate those tokens: @Service @RequiredArgsConstructor public class JwtTokenServiceImpl implements JwtTokenService { private final JwtEncoder jwtEncoder; @Override public String generateAccessToken(User user) { Instant now = Instant.now(); String scope = user.getAuthorities().stream() .map(GrantedAuthority::getAuthority) .collect(Collectors.joining(" ")); JwtClaimsSet claims = JwtClaimsSet.builder() .issuer("self") .issuedAt(now) .expiresAt(now.plus(2, ChronoUnit.MINUTES)) .subject(user.getUsername()) .claim("scope", scope) .build(); return this.jwtEncoder.encode(JwtEncoderParameters.from(claims)).getTokenValue(); } @Override public String generateRefreshToken(User user) { Instant now = Instant.now(); String scope = "ROLE_REFRESH_TOKEN"; JwtClaimsSet claims = JwtClaimsSet.builder() .issuer("self") .issuedAt(now) .expiresAt(now.plus(10, ChronoUnit.MINUTES)) .subject(user.getUsername()) .claim("scope", scope) .build(); return this.jwtEncoder.encode(JwtEncoderParameters.from(claims)).getTokenValue(); } @Override public String parseToken(String token) { try { SignedJWT decodedJWT = SignedJWT.parse(token); return decodedJWT.getJWTClaimsSet().getSubject(); } catch (ParseException e) { e.printStackTrace(); } return null; } } What I want to do is to restrict the refresh token to be used only for the refresh endpoint. Because what's the point of having a short-term live access token if you can use a refresh token for all endpoints? I have tried to give the refresh token scope REFRESH_TOKEN and added @PreAuthorize("hasRole('REFRESH_TOKEN')") annotation for the refresh token endpoint. But it doesn't work(I can still send access token to refresh endpoint and get new tokens), because Spring doesn't look for claims from token. He just loads the user from the database by username from token and checks his roles from here. Please suggest how can I make the refresh token restricted only to one endpoint. Also would be great to make him one-time use but seems that I would need to store tokens somewhere for that.
[ "the refresh token is bound to the\n client to which it was issued. \n\nsource https://www.rfc-editor.org/rfc/rfc6749#section-6\nRefresh token affect to client scope (it means all end-points). Therefore your expected is not feasibility.\n" ]
[ 0 ]
[]
[]
[ "java", "jwt", "spring_security" ]
stackoverflow_0074673915_java_jwt_spring_security.txt
Q: Pass NextJS query arg on submit react-hook-form I'm trying to pass a query arg I receive in NextJS (if it exists) as a hidden input in a react-hook-form form. The naive implementation: const GiveawayForm = () => { const { handleSubmit, register } = useForm(); const router = useRouter(); const referrer = router.query?.ref; return ( <form onSubmit={handleSubmit(onSubmitForm)}> <input id='referrer' name='referrer' type='hidden' value={referrer} {...register( 'referrer')} /> <button type='submit'>click me</button> </form> ) } fails because it's changing from uncontrolled to controlled. Simply changing value={referrer} to value={referrer ? referrer : 'foo'} fixes the error, but always ends up submitting 'foo', even though it correctly updates the 'value' field in the rendered html. I guess I have to do something with react-hook-form's resetField, but I'm not sure; and even if that's true, I can't get it to work. If I do: const GiveawayForm = () => { const { handleSubmit, register, resetField } = useForm(); const router = useRouter(); const referrer = router.query?.ref; useEffect(() => { resetField('referrer'); }, [referrer]) return ( <form onSubmit={handleSubmit(onSubmitForm)}> <input id='referrer' name='referrer' type='hidden' value={referrer ? referrer : ''} {...register( 'referrer')} /> <button type='submit'>click me</button> </form> ) } it ends up not submitting the 'referrer' field at all. What hook-fu do I need to use to be able to submit with the form a value I receive from the useRouter hook? A: To fix the issue you're encountering with the useForm hook, you can try removing the value attribute from your input element, and instead use the defaultValue attribute to set the initial value of the field. Then, you can use the reset method provided by useForm to reset the value of the field to the initial value whenever the referrer variable changes. Here is an example of how you could do this: const GiveawayForm = () => { const { handleSubmit, register, reset } = useForm(); const router = useRouter(); const referrer = router.query?.ref; useEffect(() => { reset({ referrer }); }, [referrer, reset]); return ( <form onSubmit={handleSubmit(onSubmitForm)}> <input id="referrer" name="referrer" type="hidden" defaultValue={referrer || ''} ref={register} /> <button type="submit">click me</button> </form> ); }; In this example, the input element uses the defaultValue attribute to set the initial value of the referrer field to the value of the referrer variable. Then, the useEffect hook is used to reset the form whenever the referrer variable changes, using the reset method provided by useForm. This sets the value of the referrer field to the new value of the referrer variable.
Pass NextJS query arg on submit react-hook-form
I'm trying to pass a query arg I receive in NextJS (if it exists) as a hidden input in a react-hook-form form. The naive implementation: const GiveawayForm = () => { const { handleSubmit, register } = useForm(); const router = useRouter(); const referrer = router.query?.ref; return ( <form onSubmit={handleSubmit(onSubmitForm)}> <input id='referrer' name='referrer' type='hidden' value={referrer} {...register( 'referrer')} /> <button type='submit'>click me</button> </form> ) } fails because it's changing from uncontrolled to controlled. Simply changing value={referrer} to value={referrer ? referrer : 'foo'} fixes the error, but always ends up submitting 'foo', even though it correctly updates the 'value' field in the rendered html. I guess I have to do something with react-hook-form's resetField, but I'm not sure; and even if that's true, I can't get it to work. If I do: const GiveawayForm = () => { const { handleSubmit, register, resetField } = useForm(); const router = useRouter(); const referrer = router.query?.ref; useEffect(() => { resetField('referrer'); }, [referrer]) return ( <form onSubmit={handleSubmit(onSubmitForm)}> <input id='referrer' name='referrer' type='hidden' value={referrer ? referrer : ''} {...register( 'referrer')} /> <button type='submit'>click me</button> </form> ) } it ends up not submitting the 'referrer' field at all. What hook-fu do I need to use to be able to submit with the form a value I receive from the useRouter hook?
[ "To fix the issue you're encountering with the useForm hook, you can try removing the value attribute from your input element, and instead use the defaultValue attribute to set the initial value of the field. Then, you can use the reset method provided by useForm to reset the value of the field to the initial value whenever the referrer variable changes.\nHere is an example of how you could do this:\nconst GiveawayForm = () => {\n const {\n handleSubmit,\n register,\n reset\n } = useForm();\n const router = useRouter();\n\n const referrer = router.query?.ref;\n\n useEffect(() => {\n reset({ referrer });\n }, [referrer, reset]);\n\n return (\n <form onSubmit={handleSubmit(onSubmitForm)}>\n <input\n id=\"referrer\"\n name=\"referrer\"\n type=\"hidden\"\n defaultValue={referrer || ''}\n ref={register}\n />\n <button type=\"submit\">click me</button>\n </form>\n );\n};\n\nIn this example, the input element uses the defaultValue attribute to set the initial value of the referrer field to the value of the referrer variable. Then, the useEffect hook is used to reset the form whenever the referrer variable changes, using the reset method provided by useForm. This sets the value of the referrer field to the new value of the referrer variable.\n" ]
[ 1 ]
[]
[]
[ "next.js", "react_hook_form", "react_hooks" ]
stackoverflow_0074674552_next.js_react_hook_form_react_hooks.txt
Q: pattern matching in Python with regex problem I am trying to learn pattern matching with regex, the course is through coursera and hasn't been updated since python 3 came out so the instructors code is not working correctly. Here's what I have so far: # example Wiki data wiki= """There are several Buddhist universities in the United States. Some of these have existed for decades and are accredited. Others are relatively new and are either in the process of being accredited or else have no formal accreditation. The list includes: • Dhammakaya Open University – located in Azusa, California, • Dharmakirti College – located in Tucson, Arizona • Dharma Realm Buddhist University – located in Ukiah, California • Ewam Buddhist Institute – located in Arlee, Montana • Naropa University - located in Boulder, Colorado • Institute of Buddhist Studies – located in Berkeley, California • Maitripa College – located in Portland, Oregon • Soka University of America – located in Aliso Viejo, California • University of the West – located in Rosemead, California • Won Institute of Graduate Studies – located in Glenside, Pennsylvania""" pattern=re.compile( r'(?P<title>.*)' # the university title r'(-\ located\ in\ )' #an indicator of the location r'(?P<city>\w*)' # city the university is in r'(,\ )' #seperator for the state r'(?P<state>\w.*)') #the state the city is in) for item in re.finditer(pattern, wiki, re.VERBOSE): print(item.groupdict()) Output: Traceback (most recent call last): File "/Users/r..., line 194, in <module> for item in re.finditer(pattern, wiki, re.VERBOSE): ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/re/__init__.py", line 223, in finditer return _compile(pattern, flags).finditer(string) ^^^^^^^^^^^^^^^^^^^^^^^^ File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/re/__init__.py", line 282, in _compile raise ValueError( ValueError: cannot process flags argument with a compiled pattern I only want a dictionary with the university name, the city and the state. If I run it without re.VERBOSE, only one school shows up and none of the rest are there. I am somewhat new to python and don't know what to do about these errors A: In fact, for current versions of Python, you do not need to add re.VERBOSE at all. If you do for item in re.finditer(pattern, wiki): print(item.groupdict()) the program will print {'title': '• Naropa University ', 'city': 'Boulder', 'state': 'Colorado '} using Python 3.10. By the way, the program only outputs one school because the other schools use a long hyphen – instead or a short one, -. Making all schools use the same, and changing your pattern accordingly, should give you the whole list. A: Thanks to JustLearning, my problem is solved. Here is the code I ended up using. I can't believe it was a long hyphen instead of a short one. And now I know I dont need to use the re.VERBOSE. Thank you again pattern =re.compile( r'(?P.)' r'(-\ located\ in\ )' r'(?P.)' r'(,\ )' r'(?P.*)') A: In your example data you are using 2 types of hyphens. En Dash Hyphen-Minus If you want to match both you can make use of a character class [–-] Apart from that, using .* repeats 0+ times any character (can match empty strings) and will first match until the end of the line and will allow backtracking to match the rest of the pattern. What you could do it make the pattern a bit more precise starting each group matching at least a word character. If you are only interested in the groups title, city and state you don't need the other 2 capture groups. Note that if you want to match a space that you don't have to escape it. ^\W*(?P<title>\w.*?) [–-] located in (?P<city>\w.*?), (?P<state>\w.*) ^ Start of string \W* Match optional non word characters (?P<title>\w.*?) Match a word character, followed by matching as least as possible chars [–-] Match any of the dashes with a space to the left and right located in Match literally (?P<city>\w.*?) Match a word character followed by matching as least as possible chars , Match literally (?P<state>\w.*) Match a word character followed by the rest of the line Regex demo | Python demo Example import re pattern = r"^\W*(?P<title>\w.*?) [–-] located in (?P<city>\w.*?), (?P<state>\w.*)" wiki = """There are several Buddhist universities in the United States. Some of these have existed for decades and are accredited. Others are relatively new and are either in the process of being accredited or else have no formal accreditation. The list includes: • Dhammakaya Open University – located in Azusa, California, • Dharmakirti College – located in Tucson, Arizona • Dharma Realm Buddhist University – located in Ukiah, California • Ewam Buddhist Institute – located in Arlee, Montana • Naropa University - located in Boulder, Colorado • Institute of Buddhist Studies – located in Berkeley, California • Maitripa College – located in Portland, Oregon • Soka University of America – located in Aliso Viejo, California • University of the West – located in Rosemead, California • Won Institute of Graduate Studies – located in Glenside, Pennsylvania""" for item in re.finditer(pattern, wiki, re.M): print(item.groupdict()) Output {'title': 'Dhammakaya Open University', 'city': 'Azusa', 'state': 'California,'} {'title': 'Dharmakirti College', 'city': 'Tucson', 'state': 'Arizona'} {'title': 'Dharma Realm Buddhist University', 'city': 'Ukiah', 'state': 'California'} {'title': 'Ewam Buddhist Institute', 'city': 'Arlee', 'state': 'Montana'} {'title': 'Naropa University', 'city': 'Boulder', 'state': 'Colorado'} {'title': 'Institute of Buddhist Studies', 'city': 'Berkeley', 'state': 'California'} {'title': 'Maitripa College', 'city': 'Portland', 'state': 'Oregon'} {'title': 'Soka University of America', 'city': 'Aliso Viejo', 'state': 'California'} {'title': 'University of the West', 'city': 'Rosemead', 'state': 'California'} {'title': 'Won Institute of Graduate Studies', 'city': 'Glenside', 'state': 'Pennsylvania'}
pattern matching in Python with regex problem
I am trying to learn pattern matching with regex, the course is through coursera and hasn't been updated since python 3 came out so the instructors code is not working correctly. Here's what I have so far: # example Wiki data wiki= """There are several Buddhist universities in the United States. Some of these have existed for decades and are accredited. Others are relatively new and are either in the process of being accredited or else have no formal accreditation. The list includes: • Dhammakaya Open University – located in Azusa, California, • Dharmakirti College – located in Tucson, Arizona • Dharma Realm Buddhist University – located in Ukiah, California • Ewam Buddhist Institute – located in Arlee, Montana • Naropa University - located in Boulder, Colorado • Institute of Buddhist Studies – located in Berkeley, California • Maitripa College – located in Portland, Oregon • Soka University of America – located in Aliso Viejo, California • University of the West – located in Rosemead, California • Won Institute of Graduate Studies – located in Glenside, Pennsylvania""" pattern=re.compile( r'(?P<title>.*)' # the university title r'(-\ located\ in\ )' #an indicator of the location r'(?P<city>\w*)' # city the university is in r'(,\ )' #seperator for the state r'(?P<state>\w.*)') #the state the city is in) for item in re.finditer(pattern, wiki, re.VERBOSE): print(item.groupdict()) Output: Traceback (most recent call last): File "/Users/r..., line 194, in <module> for item in re.finditer(pattern, wiki, re.VERBOSE): ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/re/__init__.py", line 223, in finditer return _compile(pattern, flags).finditer(string) ^^^^^^^^^^^^^^^^^^^^^^^^ File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/re/__init__.py", line 282, in _compile raise ValueError( ValueError: cannot process flags argument with a compiled pattern I only want a dictionary with the university name, the city and the state. If I run it without re.VERBOSE, only one school shows up and none of the rest are there. I am somewhat new to python and don't know what to do about these errors
[ "In fact, for current versions of Python, you do not need to add re.VERBOSE at all. If you do\nfor item in re.finditer(pattern, wiki): \n print(item.groupdict())\n\nthe program will print\n{'title': '• Naropa University ', 'city': 'Boulder', 'state': 'Colorado '}\n\nusing Python 3.10.\nBy the way, the program only outputs one school because the other schools use a long hyphen – instead or a short one, -. Making all schools use the same, and changing your pattern accordingly, should give you the whole list.\n", "Thanks to JustLearning, my problem is solved. Here is the code I ended up using. I can't believe it was a long hyphen instead of a short one. And now I know I dont need to use the re.VERBOSE. Thank you again\npattern =re.compile(\nr'(?P.)'\nr'(-\\ located\\ in\\ )'\nr'(?P.)'\nr'(,\\ )'\nr'(?P.*)')\n", "In your example data you are using 2 types of hyphens.\n\nEn Dash\nHyphen-Minus\n\nIf you want to match both you can make use of a character class [–-]\nApart from that, using .* repeats 0+ times any character (can match empty strings) and will first match until the end of the line and will allow backtracking to match the rest of the pattern.\nWhat you could do it make the pattern a bit more precise starting each group matching at least a word character.\nIf you are only interested in the groups title, city and state you don't need the other 2 capture groups.\nNote that if you want to match a space that you don't have to escape it.\n^\\W*(?P<title>\\w.*?) [–-] located in (?P<city>\\w.*?), (?P<state>\\w.*)\n\n\n^ Start of string\n\\W* Match optional non word characters\n(?P<title>\\w.*?) Match a word character, followed by matching as least as possible chars\n [–-] Match any of the dashes with a space to the left and right\nlocated in Match literally\n(?P<city>\\w.*?) Match a word character followed by matching as least as possible chars\n, Match literally\n(?P<state>\\w.*) Match a word character followed by the rest of the line\n\nRegex demo | Python demo\nExample\nimport re\n\npattern = r\"^\\W*(?P<title>\\w.*?) [–-] located in (?P<city>\\w.*?), (?P<state>\\w.*)\"\n\nwiki = \"\"\"There are several Buddhist universities in the United States. Some of these have existed for decades and are accredited. Others are relatively new and are either in the process of being accredited or else have no formal accreditation. The list includes:\n• Dhammakaya Open University – located in Azusa, California,\n• Dharmakirti College – located in Tucson, Arizona\n• Dharma Realm Buddhist University – located in Ukiah, California\n• Ewam Buddhist Institute – located in Arlee, Montana\n• Naropa University - located in Boulder, Colorado\n• Institute of Buddhist Studies – located in Berkeley, California\n• Maitripa College – located in Portland, Oregon\n• Soka University of America – located in Aliso Viejo, California\n• University of the West – located in Rosemead, California\n• Won Institute of Graduate Studies – located in Glenside, Pennsylvania\"\"\"\n\nfor item in re.finditer(pattern, wiki, re.M):\n print(item.groupdict())\n\nOutput\n{'title': 'Dhammakaya Open University', 'city': 'Azusa', 'state': 'California,'}\n{'title': 'Dharmakirti College', 'city': 'Tucson', 'state': 'Arizona'}\n{'title': 'Dharma Realm Buddhist University', 'city': 'Ukiah', 'state': 'California'}\n{'title': 'Ewam Buddhist Institute', 'city': 'Arlee', 'state': 'Montana'}\n{'title': 'Naropa University', 'city': 'Boulder', 'state': 'Colorado'}\n{'title': 'Institute of Buddhist Studies', 'city': 'Berkeley', 'state': 'California'}\n{'title': 'Maitripa College', 'city': 'Portland', 'state': 'Oregon'}\n{'title': 'Soka University of America', 'city': 'Aliso Viejo', 'state': 'California'}\n{'title': 'University of the West', 'city': 'Rosemead', 'state': 'California'}\n{'title': 'Won Institute of Graduate Studies', 'city': 'Glenside', 'state': 'Pennsylvania'}\n\n" ]
[ 0, 0, 0 ]
[]
[]
[ "pattern_matching", "python", "regex" ]
stackoverflow_0074670737_pattern_matching_python_regex.txt
Q: regex matched values convert to float/integers Consider this example: import re string = "1-3-a" a, b, c = re.match("(\d+)-(\d+)-(\w+)", string).groups() print(a + b) This will print: '13'. However, I want to use these values as digits (integers or floats), while keeping variable c as a string. Of course I can do a = int(a) etc. but I think there must be a more convenient way to do this (especially when you are matching way more variables). Unfortunately I cannot find anything about this, originally I thought that regex will deal with this automatically as I am saying it must be a digit. EDIT: I think this is different from the supposed duplicate question as I am trying to match multiple parts of the string into multiple variables. A: Regex will not do this natively, that's simply not its job. One way you could achieve it (if you wanted more of a "one-line" solution) is to use the map function to apply the int() function to every element in the groups tuple. import re string = "1-3" a, b = map(int, re.match("(\d+)-(\d+)", string).groups()) print(a + b)
regex matched values convert to float/integers
Consider this example: import re string = "1-3-a" a, b, c = re.match("(\d+)-(\d+)-(\w+)", string).groups() print(a + b) This will print: '13'. However, I want to use these values as digits (integers or floats), while keeping variable c as a string. Of course I can do a = int(a) etc. but I think there must be a more convenient way to do this (especially when you are matching way more variables). Unfortunately I cannot find anything about this, originally I thought that regex will deal with this automatically as I am saying it must be a digit. EDIT: I think this is different from the supposed duplicate question as I am trying to match multiple parts of the string into multiple variables.
[ "Regex will not do this natively, that's simply not its job. One way you could achieve it (if you wanted more of a \"one-line\" solution) is to use the map function to apply the int() function to every element in the groups tuple.\nimport re\nstring = \"1-3\"\na, b = map(int, re.match(\"(\\d+)-(\\d+)\", string).groups())\nprint(a + b)\n\n" ]
[ 1 ]
[]
[]
[ "match", "python", "regex" ]
stackoverflow_0074674564_match_python_regex.txt
Q: Can I read a txt file every few seconds and display its contents through html? I have a txt file that updates with new data every few seconds. It is stored and generated locally on a raspberry pi, which will also act as the server. I want its contents to be added to and html code for displaying. It should update without manual reloading of the page. Is there a way to do this? Maybe with AJAX, PHP, or something along those lines? Don't have to find/write any code for me, as I understand that it would be rather time consuming. just point me in the right direction so I can learn how to do it. A: You could do this using an API endpoint and an ajax call on the client. I sketched up some code for you. I made the endpoint url /url/to/api.php - you would change this to match the server setup on the pi. You would also need to host a HTML file that has some javascript code that polls your api every few seconds. I set it to do this every 5th second, using setInterval. <script> // The client code (javascript) - should be placed right before the </body> tag (async () => { setInterval(async () => { const data = await fetch("/url/to/api.php").then(response => response.text()); document.getElementById("#htmlElementWithThisId").innerHTML(data); }, 5000); })() </script> // In the html you have to have an element with the id: "htmlElementWithThisId" - this is where the content will be displayed <div id="htmlElementWithThisId"></div> Finally in the api.php file you would read your file and "echo" the contents of your file on every request A: you can use jQuery,$.ajax ,$.post or $.get or can also use XMLHttpRequest for javascript (old but gold) and for php use readFile (server-side no need for API) little story may help once i used arduino with wifi module i collected the data using the arduino, after that i passed it to esp8266 (the wifi modle) and i posted to my site using GET methode like http://mySite.lo/?firstVar=myFirstVar&secondVar=mySecondVar and the server took the GET data from the URL update: the refresh of page for php it's header("refresh: 3;") for js write setInterval(location.reload(),3000)
Can I read a txt file every few seconds and display its contents through html?
I have a txt file that updates with new data every few seconds. It is stored and generated locally on a raspberry pi, which will also act as the server. I want its contents to be added to and html code for displaying. It should update without manual reloading of the page. Is there a way to do this? Maybe with AJAX, PHP, or something along those lines? Don't have to find/write any code for me, as I understand that it would be rather time consuming. just point me in the right direction so I can learn how to do it.
[ "You could do this using an API endpoint and an ajax call on the client.\nI sketched up some code for you.\nI made the endpoint url /url/to/api.php - you would change this to match the server setup on the pi.\nYou would also need to host a HTML file that has some javascript code that polls your api every few seconds. I set it to do this every 5th second, using setInterval.\n<script>\n// The client code (javascript) - should be placed right before the </body> tag\n(async () => {\n setInterval(async () => {\n const data = await fetch(\"/url/to/api.php\").then(response => response.text());\n document.getElementById(\"#htmlElementWithThisId\").innerHTML(data);\n }, 5000);\n})()\n</script>\n\n// In the html you have to have an element with the id: \"htmlElementWithThisId\" - this is where the content will be displayed\n\n<div id=\"htmlElementWithThisId\"></div>\n\n\nFinally in the api.php file you would read your file and \"echo\" the contents of your file on every request\n", "you can use jQuery,$.ajax ,$.post or $.get\nor can also use XMLHttpRequest for javascript (old but gold)\nand for php use readFile (server-side no need for API)\nlittle story may help\nonce i used arduino with wifi module\ni collected the data using the arduino, after that i passed it to esp8266 (the wifi modle) and i posted to my site using GET methode\nlike\nhttp://mySite.lo/?firstVar=myFirstVar&secondVar=mySecondVar\nand the server took the GET data from the URL\nupdate:\nthe refresh of page\nfor php it's header(\"refresh: 3;\")\nfor js write setInterval(location.reload(),3000)\n" ]
[ 1, 1 ]
[]
[]
[ "ajax", "html", "php", "txt" ]
stackoverflow_0074674421_ajax_html_php_txt.txt
Q: Quickfilter should add value to list of filters I have a list of custom filters for a basic list in react-admin like this: const ClientListsFilter = (props: FilterProps): JSX.Element => { return ( <Filter {...props}> <TextInput label="First Name" source="firstName" resettable /> <TextInput label="Last Name" source="lastName" resettable /> <TextInput label="E-Mail" source="email" resettable /> <QuickFilter label="Has Event Accepted" source="hasEventAccepted" defaultValue={true} /> </Filter> ) } When I selected now the QuickFilter "Has Event Accepted" it adds the following to the query: hasEventAccepted%22%3Atrue which is to be expected. Now, I want to add multiple such "Has Event XYZ" QuickFilters, but instead of having a pair for each in the query it would make sense to just have a list like hasEvents=[1,2,3] Is there a way to achieve this in react-admin? How could I combine multiple values so that a QuickFilter will add directly multiple such events? A: You could use an array as the value for the QuickFilter, and then use the includes operator in your filter query to check if the array contains a given value. For example, if you have a list of QuickFilters with values 1, 2, and 3, you could combine these values into an array and pass it to the QuickFilter component: const events = [1, 2, 3]; <QuickFilter label="Has Events" source="hasEvents" defaultValue={events} /> Then, in your filter query, you could use the includes operator to check if the hasEvents field contains a given value: const filters = [ { field: "hasEvents", operator: "includes", value: 1 } ]; This would return all records where the hasEvents field contains the value 1.
Quickfilter should add value to list of filters
I have a list of custom filters for a basic list in react-admin like this: const ClientListsFilter = (props: FilterProps): JSX.Element => { return ( <Filter {...props}> <TextInput label="First Name" source="firstName" resettable /> <TextInput label="Last Name" source="lastName" resettable /> <TextInput label="E-Mail" source="email" resettable /> <QuickFilter label="Has Event Accepted" source="hasEventAccepted" defaultValue={true} /> </Filter> ) } When I selected now the QuickFilter "Has Event Accepted" it adds the following to the query: hasEventAccepted%22%3Atrue which is to be expected. Now, I want to add multiple such "Has Event XYZ" QuickFilters, but instead of having a pair for each in the query it would make sense to just have a list like hasEvents=[1,2,3] Is there a way to achieve this in react-admin? How could I combine multiple values so that a QuickFilter will add directly multiple such events?
[ "You could use an array as the value for the QuickFilter, and then use the includes operator in your filter query to check if the array contains a given value.\nFor example, if you have a list of QuickFilters with values 1, 2, and 3, you could combine these values into an array and pass it to the QuickFilter component:\nconst events = [1, 2, 3];\n\n<QuickFilter label=\"Has Events\" source=\"hasEvents\" defaultValue={events} />\n\nThen, in your filter query, you could use the includes operator to check if the hasEvents field contains a given value:\nconst filters = [\n {\n field: \"hasEvents\",\n operator: \"includes\",\n value: 1\n }\n];\n\nThis would return all records where the hasEvents field contains the value 1.\n" ]
[ 0 ]
[]
[]
[ "javascript", "query_parameters", "react_admin", "reactjs", "typescript" ]
stackoverflow_0074666293_javascript_query_parameters_react_admin_reactjs_typescript.txt
Q: Is there any ORM which supports cloud database? Will prisma supports cloud database? I was looking for an orm which supports cloud database. A: Yes, there are several ORM libraries that support cloud databases. One is Prisma. Prisma is a modern ORM that provides a simple and powerful query language, enabling developers to easily access and manipulate data in cloud databases. Prisma supports MySQL, PostgreSQL, SQLite, MongoDB, and more. Another one Objection.js. Objection.js is an ORM for Node.js that is built on top of the SQL query builder Knex.js. It provides a simple and intuitive API. You can also use Sequelize, TypeORM, Waterline, and ActiveRecord. Choose wisely depends on your needs.
Is there any ORM which supports cloud database?
Will prisma supports cloud database? I was looking for an orm which supports cloud database.
[ "Yes, there are several ORM libraries that support cloud databases. One is Prisma. Prisma is a modern ORM that provides a simple and powerful query language, enabling developers to easily access and manipulate data in cloud databases. Prisma supports MySQL, PostgreSQL, SQLite, MongoDB, and more.\nAnother one Objection.js. Objection.js is an ORM for Node.js that is built on top of the SQL query builder Knex.js. It provides a simple and intuitive API.\nYou can also use Sequelize, TypeORM, Waterline, and ActiveRecord. Choose wisely depends on your needs.\n" ]
[ 0 ]
[]
[]
[ "orm", "prisma", "reactjs" ]
stackoverflow_0074674577_orm_prisma_reactjs.txt
Q: How do I reconvert JSON into custom object using TypeToken? Basically, I have an object of the type: ArrayList<Events> Which I converted into JSON using gson to save it in my SharedPreferences. Now when I wanted to reconvert the JSON formatted data into my custom object structure, the application just stops responding (debugger says "application is running", no response after). This is the line: Type eventType = new TypeToken<ArrayList<Events>>() {}.getType(); events = gson.fromJson(eventString,eventType); A: There are a few potential issues that could be causing this. One of them is the JSON string is not properly formatted, which would cause Gson to be unable to parse it. Another possibility is that the Events class is not properly set up to be used with Gson. For instance, if the Events class does not have a default constructor (a constructor with no arguments), then Gson will not be able to create instances of that class. One way to troubleshoot this issue would be to log the JSON string before conversion and verify it is properly formatted and contains the expected data. Also try printing out the eventType variable to verify that it is correct. Additionally, you can use Gson's built-in JsonParser class to parse the JSON string and create a JsonElement object, which you can then use to manually construct a list of Events objects. This can help you isolate where the issue is occurring and determine how to fix it. Usage of JsonParser to parse the JSON string and manually construct a list of Events objects: String eventString = ...; // JSON string you want to parse JsonElement jsonElement = new JsonParser().parse(eventString); // Verify that the JSON string was parsed successfully if (jsonElement.isJsonArray()) { // Get the JSON array from the JsonElement JsonArray jsonArray = jsonElement.getAsJsonArray(); // Create an empty ArrayList of Events ArrayList<Events> events = new ArrayList<>(); // Iterate over the JSON array and manually construct // a list of Events objects for (JsonElement element : jsonArray) { // Verify that each element in the array is an object if (element.isJsonObject()) { JsonObject jsonObject = element.getAsJsonObject(); Events event = new Events(); // Set the fields of the Events object using the // data from the JSON object event.setName(jsonObject.get("name").getAsString()); event.setStartTime(jsonObject.get("startTime").getAsLong()); event.setEndTime(jsonObject.get("endTime").getAsLong()); // Add the Events object to the list events.add(event); } } }
How do I reconvert JSON into custom object using TypeToken?
Basically, I have an object of the type: ArrayList<Events> Which I converted into JSON using gson to save it in my SharedPreferences. Now when I wanted to reconvert the JSON formatted data into my custom object structure, the application just stops responding (debugger says "application is running", no response after). This is the line: Type eventType = new TypeToken<ArrayList<Events>>() {}.getType(); events = gson.fromJson(eventString,eventType);
[ "There are a few potential issues that could be causing this. One of them is the JSON string is not properly formatted, which would cause Gson to be unable to parse it. Another possibility is that the Events class is not properly set up to be used with Gson. For instance, if the Events class does not have a default constructor (a constructor with no arguments), then Gson will not be able to create instances of that class.\nOne way to troubleshoot this issue would be to log the JSON string before conversion and verify it is properly formatted and contains the expected data. Also try printing out the eventType variable to verify that it is correct.\nAdditionally, you can use Gson's built-in JsonParser class to parse the JSON string and create a JsonElement object, which you can then use to manually construct a list of Events objects. This can help you isolate where the issue is occurring and determine how to fix it.\nUsage of JsonParser to parse the JSON string and manually construct a list of Events objects:\nString eventString = ...; // JSON string you want to parse\nJsonElement jsonElement = new JsonParser().parse(eventString);\n// Verify that the JSON string was parsed successfully\nif (jsonElement.isJsonArray()) {\n // Get the JSON array from the JsonElement\n JsonArray jsonArray = jsonElement.getAsJsonArray();\n\n // Create an empty ArrayList of Events\n ArrayList<Events> events = new ArrayList<>();\n\n // Iterate over the JSON array and manually construct\n // a list of Events objects\n for (JsonElement element : jsonArray) {\n // Verify that each element in the array is an object\n if (element.isJsonObject()) {\n JsonObject jsonObject = element.getAsJsonObject();\n Events event = new Events();\n\n // Set the fields of the Events object using the\n // data from the JSON object\n event.setName(jsonObject.get(\"name\").getAsString());\n event.setStartTime(jsonObject.get(\"startTime\").getAsLong());\n event.setEndTime(jsonObject.get(\"endTime\").getAsLong());\n\n // Add the Events object to the list\n events.add(event);\n }\n }\n}\n\n" ]
[ 1 ]
[]
[]
[ "android", "gson", "java", "json", "serialization" ]
stackoverflow_0074674418_android_gson_java_json_serialization.txt
Q: Minio: found backend type fs, expected xl or xl-single I try to upgrade minio version in my docker commpose(previously I used image: minio/minio:RELEASE.2020-06-22T03-12-50Z and it was working ) For now I have following docker-compose service: version: '3.6' services: minio: container_name: minio image: minio/minio:RELEASE.2022-11-17T23-20-09Z.fips volumes: - minio-data:/data ports: - 9000:9000 environment: - MINIO_ROOT_USER=minio - MINIO_ROOT_PASSWORD=minio123 command: server /data healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"] interval: 30s timeout: 20s retries: 3 When I try to start(docker-compose up -d) I see the following error in the minio container log: 2022-11-25 11:40:56 ERROR Unable to use the drive /data: Drive /data: found backend type fs, expected xl or xl-single - to migrate to a supported backend visit https://min.io/docs/minio/linux/operations/install-deploy-manage/migrate-fs-gateway.html: Invalid arguments specified I've googled the following article https://min.io/docs/minio/linux/operations/install-deploy-manage/migrate-fs-gateway.html But I still don't understand what shoud I change in my compose file to make it working. A: It is not a solution but workaround how to use a fresh version: minio: container_name: minio image: bitnami/minio:2022.11.17-debian-11-r0 volumes: - minio-data:/data ports: - 9000:9000 - 9001:9001 environment: - MINIO_ROOT_USER=minio - MINIO_ROOT_PASSWORD=minio123 - MINIO_DEFAULT_BUCKETS=mybucket1,mybucket2 healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"] interval: 30s timeout: 20s A: looks like you need migrate data/fs in your volum to be used in new version of minio so you need to run steps from https://min.io/docs/minio/linux/operations/install-deploy-manage/migrate-fs-gateway.html In your compose you need to add volumes: minio-data: driver: local
Minio: found backend type fs, expected xl or xl-single
I try to upgrade minio version in my docker commpose(previously I used image: minio/minio:RELEASE.2020-06-22T03-12-50Z and it was working ) For now I have following docker-compose service: version: '3.6' services: minio: container_name: minio image: minio/minio:RELEASE.2022-11-17T23-20-09Z.fips volumes: - minio-data:/data ports: - 9000:9000 environment: - MINIO_ROOT_USER=minio - MINIO_ROOT_PASSWORD=minio123 command: server /data healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"] interval: 30s timeout: 20s retries: 3 When I try to start(docker-compose up -d) I see the following error in the minio container log: 2022-11-25 11:40:56 ERROR Unable to use the drive /data: Drive /data: found backend type fs, expected xl or xl-single - to migrate to a supported backend visit https://min.io/docs/minio/linux/operations/install-deploy-manage/migrate-fs-gateway.html: Invalid arguments specified I've googled the following article https://min.io/docs/minio/linux/operations/install-deploy-manage/migrate-fs-gateway.html But I still don't understand what shoud I change in my compose file to make it working.
[ "It is not a solution but workaround how to use a fresh version:\n minio:\n container_name: minio\n image: bitnami/minio:2022.11.17-debian-11-r0\n volumes:\n - minio-data:/data\n ports:\n - 9000:9000\n - 9001:9001\n environment:\n - MINIO_ROOT_USER=minio\n - MINIO_ROOT_PASSWORD=minio123\n - MINIO_DEFAULT_BUCKETS=mybucket1,mybucket2\n healthcheck:\n test: [\"CMD\", \"curl\", \"-f\", \"http://localhost:9000/minio/health/live\"]\n interval: 30s\n timeout: 20s\n\n", "looks like you need migrate data/fs in your volum to be used in new version of minio\nso you need to run steps from\nhttps://min.io/docs/minio/linux/operations/install-deploy-manage/migrate-fs-gateway.html\nIn your compose you need to add\nvolumes:\n minio-data:\n driver: local\n\n" ]
[ 1, 0 ]
[]
[]
[ "docker", "docker_compose", "minio" ]
stackoverflow_0074570195_docker_docker_compose_minio.txt
Q: I want all my routes to work but for some reasons some are working and others are giving me 313 error I am using an API for my weather app Here's my code const express = require('express'); const cors = require('cors') const request = require('request'); const PORT = 3060; const app = express() app.use(cors()); app.use(express.json()); The starting two endpoints are working fine (weather and location). They are sending the JSON to the web page app.get('/weather',(req,res) => { let city = req.query.city; const request = require('request'); request(`http://api.weatherapi.com/v1/current.json?key={myKEy}&q=${city}&aqi=no`, function (error, response, body) { console.log(body) let data = JSON.parse(body); if(response.statusCode === 200){ res.send(data.current.condition.text) } }); }) app.get('/location',(req,res) => { let city = req.query.city; const request = require('request'); request(`http://api.weatherapi.com/v1/current.json?key={myKEy}&q=${city}&aqi=no`, function (error, response, body) { console.log(body) let data = JSON.parse(body); if(response.statusCode === 200){ res.send(data.location.name) } }); }) Now the rest are keep giving me errors 313(Invalid Store): {"location":{"name":"Nanital","region":"Uttarakhand","country":"India","lat":29.38,"lon":79.45,"tz_id":"Asia/Kolkata","localtime_epoch":1670136364,"localtime":"2022-12-04 12:16"},"current":{"last_updated_epoch":1670136300,"last_updated":"2022-12-04 12:15","temp_c":25.2,"temp_f":77.4,"is_day":1,"condition":{"text":"Sunny","icon":"//cdn.weatherapi.com/weather/64x64/day/113.png","code":1000},"wind_mph":2.2,"wind_kph":3.6,"wind_degree":229,"wind_dir":"SW","pressure_mb":1014.0,"pressure_in":29.95,"precip_mm":0.0,"precip_in":0.0,"humidity":30,"cloud":0,"feelslike_c":24.8,"feelslike_f":76.6,"vis_km":10.0,"vis_miles":6.0,"uv":7.0,"gust_mph":1.3,"gust_kph":2.2}} express deprecated res.send(status): Use res.sendStatus(status) instead Server.js:61:21 node:_http_server:313 throw new ERR_HTTP_INVALID_STATUS_CODE(originalStatusCode); ^ RangeError [ERR_HTTP_INVALID_STATUS_CODE]: Invalid status code: 30 at new NodeError (node:internal/errors:393:5) at ServerResponse.writeHead (node:_http_server:313:11) at ServerResponse._implicitHeader (node:_http_server:304:8) at ServerResponse.end (node:_http_outgoing:993:10) at ServerResponse.send (E:\weatherapp\node_modules\express\lib\response.js:232:10) at Request._callback (E:\weatherapp\Server.js:61:21) at self.callback (E:\weatherapp\node_modules\request\request.js:185:22) at Request.emit (node:events:513:28) at Request.<anonymous> (E:\weatherapp\node_modules\request\request.js:1154:10) at Request.emit (node:events:513:28) { code: 'ERR_HTTP_INVALID_STATUS_CODE' } Node.js v18.10.0 [nodemon] app crashed - waiting for file changes before starting... I re-checked my JSON path several times and they are correct but still the problem is same app.get('/humidity',(req,res) => { let city = req.query.city; const request = require('request'); request(`http://api.weatherapi.com/v1/current.json?key={myKEy}&q=${city}&aqi=no`, function (error, response, body) { console.log(body) let data = JSON.parse(body); if(response.statusCode === 200){ res.send(data.current.humidity) } }); }) app.get('/temprature',(req,res) => { let city = req.query.city; const request = require('request'); request(`http://api.weatherapi.com/v1/current.json?key={myKEy}&q=${city}&aqi=no`, function (error, response, body) { console.log(body) let data = JSON.parse(body); if(response.statusCode === 200){ res.send(data.current.temp_c) } }); }) app.get('/wind',(req,res) => { let city = req.query.city; const request = require('request'); request(`http://api.weatherapi.com/v1/current.json?key={myKEy}=${city}&aqi=no`, function (error, response, body) { console.log(body) let data = JSON.parse(body); if(response.statusCode === 200){ res.send(data.current.wind_kph) } }); }) app.listen( PORT , () => console.log(`This is Listing to port ${PORT}`) ) A: res.send() consider int value as status code. Though try passing value through strings. app.get('/wind',(req,res) => { let city = req.query.city; const request = require('request'); request(`http://api.weatherapi.com/v1/current.json?key={myKEy}=${city}&aqi=no`, function (error, response, body) { console.log(body) let data = JSON.parse(body); if(response.statusCode === 200){ let windV = "The wind speed is : "+data.current.wind_kph; res.send(windV); } }); })
I want all my routes to work but for some reasons some are working and others are giving me 313 error
I am using an API for my weather app Here's my code const express = require('express'); const cors = require('cors') const request = require('request'); const PORT = 3060; const app = express() app.use(cors()); app.use(express.json()); The starting two endpoints are working fine (weather and location). They are sending the JSON to the web page app.get('/weather',(req,res) => { let city = req.query.city; const request = require('request'); request(`http://api.weatherapi.com/v1/current.json?key={myKEy}&q=${city}&aqi=no`, function (error, response, body) { console.log(body) let data = JSON.parse(body); if(response.statusCode === 200){ res.send(data.current.condition.text) } }); }) app.get('/location',(req,res) => { let city = req.query.city; const request = require('request'); request(`http://api.weatherapi.com/v1/current.json?key={myKEy}&q=${city}&aqi=no`, function (error, response, body) { console.log(body) let data = JSON.parse(body); if(response.statusCode === 200){ res.send(data.location.name) } }); }) Now the rest are keep giving me errors 313(Invalid Store): {"location":{"name":"Nanital","region":"Uttarakhand","country":"India","lat":29.38,"lon":79.45,"tz_id":"Asia/Kolkata","localtime_epoch":1670136364,"localtime":"2022-12-04 12:16"},"current":{"last_updated_epoch":1670136300,"last_updated":"2022-12-04 12:15","temp_c":25.2,"temp_f":77.4,"is_day":1,"condition":{"text":"Sunny","icon":"//cdn.weatherapi.com/weather/64x64/day/113.png","code":1000},"wind_mph":2.2,"wind_kph":3.6,"wind_degree":229,"wind_dir":"SW","pressure_mb":1014.0,"pressure_in":29.95,"precip_mm":0.0,"precip_in":0.0,"humidity":30,"cloud":0,"feelslike_c":24.8,"feelslike_f":76.6,"vis_km":10.0,"vis_miles":6.0,"uv":7.0,"gust_mph":1.3,"gust_kph":2.2}} express deprecated res.send(status): Use res.sendStatus(status) instead Server.js:61:21 node:_http_server:313 throw new ERR_HTTP_INVALID_STATUS_CODE(originalStatusCode); ^ RangeError [ERR_HTTP_INVALID_STATUS_CODE]: Invalid status code: 30 at new NodeError (node:internal/errors:393:5) at ServerResponse.writeHead (node:_http_server:313:11) at ServerResponse._implicitHeader (node:_http_server:304:8) at ServerResponse.end (node:_http_outgoing:993:10) at ServerResponse.send (E:\weatherapp\node_modules\express\lib\response.js:232:10) at Request._callback (E:\weatherapp\Server.js:61:21) at self.callback (E:\weatherapp\node_modules\request\request.js:185:22) at Request.emit (node:events:513:28) at Request.<anonymous> (E:\weatherapp\node_modules\request\request.js:1154:10) at Request.emit (node:events:513:28) { code: 'ERR_HTTP_INVALID_STATUS_CODE' } Node.js v18.10.0 [nodemon] app crashed - waiting for file changes before starting... I re-checked my JSON path several times and they are correct but still the problem is same app.get('/humidity',(req,res) => { let city = req.query.city; const request = require('request'); request(`http://api.weatherapi.com/v1/current.json?key={myKEy}&q=${city}&aqi=no`, function (error, response, body) { console.log(body) let data = JSON.parse(body); if(response.statusCode === 200){ res.send(data.current.humidity) } }); }) app.get('/temprature',(req,res) => { let city = req.query.city; const request = require('request'); request(`http://api.weatherapi.com/v1/current.json?key={myKEy}&q=${city}&aqi=no`, function (error, response, body) { console.log(body) let data = JSON.parse(body); if(response.statusCode === 200){ res.send(data.current.temp_c) } }); }) app.get('/wind',(req,res) => { let city = req.query.city; const request = require('request'); request(`http://api.weatherapi.com/v1/current.json?key={myKEy}=${city}&aqi=no`, function (error, response, body) { console.log(body) let data = JSON.parse(body); if(response.statusCode === 200){ res.send(data.current.wind_kph) } }); }) app.listen( PORT , () => console.log(`This is Listing to port ${PORT}`) )
[ "res.send() consider int value as status code. Though try passing value through strings.\n\n\napp.get('/wind',(req,res) => {\n let city = req.query.city;\n const request = require('request');\nrequest(`http://api.weatherapi.com/v1/current.json?key={myKEy}=${city}&aqi=no`,\n function (error, response, body) {\n console.log(body)\n let data = JSON.parse(body);\n if(response.statusCode === 200){\n let windV = \"The wind speed is : \"+data.current.wind_kph;\n res.send(windV);\n }\n});\n}) \n\n\n\n" ]
[ 0 ]
[]
[]
[ "express", "javascript", "node.js", "reactjs" ]
stackoverflow_0074673500_express_javascript_node.js_reactjs.txt
Q: Getting segmenatation fault even though i freed memory #include <stdio.h> #include <stdlib.h> #include <stdint.h> typedef uint8_t BYTE; int main(int argc, char *argv[]) { // Only allow 2 command line arguments if (argc != 2) { printf("Usage: ./recover FILENAME\n"); return 1; } // Open the file FILE *input_file = fopen(argv[1], "r"); // Check if the input_file file is a valid ifle if (input_file == NULL) { printf("No existing file"); return 1; } // Make a buffer for storing 512 bytes, !unsigned because our values MUSTN'T be negative! unsigned char buffer[512]; // Count the number of images we found int image_count = 0; // Make a pointer for recovered images FILE *output_file = NULL; // Use malloc for memory allocation char *filename = malloc(8 * sizeof(char)); // Read the blocks of our array while (fread(buffer, sizeof(char), 512, input_file)) { // Check for JPEG header hex values if (buffer[0] == 0xff && buffer[1] == 0xd8 && buffer[2] == 0xff && (buffer[3] & 0xf0) == 0xe0 ) { // Set the names of the JPEG files sprintf(filename, "%03i.jpg", image_count); // Open the file we will write to output_file = fopen(filename, "w"); // Count the number of images we found image_count++; } // Check validity of input output_file input if (output_file != NULL) { fwrite(buffer, sizeof(char), 512, output_file); } // Free the memory becuase we used malloc (to prevent leaking/segmentation fault) free(filename); // Close both the input and output files fclose(output_file); fclose(input_file); return 0; } } I am trying to solve this CS50 problem set and i don't get why I'm getting segmentation fault even though i freed memory. I obviously did something wrong, but i can't wrap my head around what exactly because I am only a begginer in c, especially with all this malloc stuff. Also, I can compile my code, it's only that I get an error when running the program. A: You're freeing inside the loop. i.e. you allocate once but try to free it several times. You want to free it just once. Put it outside the loop. OTOH, since you know the filenames are only 7 bytes long, you might as well use a char array instead of mallocing. char filename[256]; Then use snprintf: snprintf(filename, sizeof filename, "%03i.jpg", image_count);
Getting segmenatation fault even though i freed memory
#include <stdio.h> #include <stdlib.h> #include <stdint.h> typedef uint8_t BYTE; int main(int argc, char *argv[]) { // Only allow 2 command line arguments if (argc != 2) { printf("Usage: ./recover FILENAME\n"); return 1; } // Open the file FILE *input_file = fopen(argv[1], "r"); // Check if the input_file file is a valid ifle if (input_file == NULL) { printf("No existing file"); return 1; } // Make a buffer for storing 512 bytes, !unsigned because our values MUSTN'T be negative! unsigned char buffer[512]; // Count the number of images we found int image_count = 0; // Make a pointer for recovered images FILE *output_file = NULL; // Use malloc for memory allocation char *filename = malloc(8 * sizeof(char)); // Read the blocks of our array while (fread(buffer, sizeof(char), 512, input_file)) { // Check for JPEG header hex values if (buffer[0] == 0xff && buffer[1] == 0xd8 && buffer[2] == 0xff && (buffer[3] & 0xf0) == 0xe0 ) { // Set the names of the JPEG files sprintf(filename, "%03i.jpg", image_count); // Open the file we will write to output_file = fopen(filename, "w"); // Count the number of images we found image_count++; } // Check validity of input output_file input if (output_file != NULL) { fwrite(buffer, sizeof(char), 512, output_file); } // Free the memory becuase we used malloc (to prevent leaking/segmentation fault) free(filename); // Close both the input and output files fclose(output_file); fclose(input_file); return 0; } } I am trying to solve this CS50 problem set and i don't get why I'm getting segmentation fault even though i freed memory. I obviously did something wrong, but i can't wrap my head around what exactly because I am only a begginer in c, especially with all this malloc stuff. Also, I can compile my code, it's only that I get an error when running the program.
[ "You're freeing inside the loop. i.e. you allocate once but try to free it several times.\nYou want to free it just once. Put it outside the loop.\nOTOH, since you know the filenames are only 7 bytes long, you might as well use a char array instead of mallocing.\nchar filename[256];\n\nThen use snprintf:\nsnprintf(filename, sizeof filename, \"%03i.jpg\", image_count);\n\n" ]
[ 1 ]
[]
[]
[ "c", "cs50" ]
stackoverflow_0074674597_c_cs50.txt
Q: react app is successfully created, but npm start throws an error I installed the latest nodejs 19.2.0 on my windows 11 OS rather than the recommended for most users 18.12.1 npx create-react-app my-first-app works just fine, it creates all the files and folders without any errors, shows happy hacking message as well, recommends to use npm start command. I go inside my-first-app folder, go npm start and I get a module not found error...like this picture Error Message Screenshot In youtube tutorials, there is no any error in their pc. npm start runs just as easily as npx create-react-app <anyappname>. A: the create-react-app script is trying to import a module called react-scripts which is not installed in your project. try running npm install react-scripts inside your project folder. This will install the missing module and allow you to run the npm start command successfully. If you still encounter an error after running this command, you may need to delete the node_modules folder and run npm install again to reinstall all of the dependencies for your project. A: Try this first: npm install react-scripts If the error wasn't solved, then try this: rm -rf node_modules npm cache clean -f npm install npm install react-scripts A: F:\Tutorial & Practice\Frontend development\React Projects\my-first-app. With this path to your project, I can see there is an & symbol in the Tutorial & Practice directory. Sometimes that might cause error. Try renaming the directory without any symbols or even spaces, just to be sure. Then run npm start. And if that still doesn't work delete the node_modules folder and then npm install again and see if that works.
react app is successfully created, but npm start throws an error
I installed the latest nodejs 19.2.0 on my windows 11 OS rather than the recommended for most users 18.12.1 npx create-react-app my-first-app works just fine, it creates all the files and folders without any errors, shows happy hacking message as well, recommends to use npm start command. I go inside my-first-app folder, go npm start and I get a module not found error...like this picture Error Message Screenshot In youtube tutorials, there is no any error in their pc. npm start runs just as easily as npx create-react-app <anyappname>.
[ "the create-react-app script is trying to import a module called react-scripts which is not installed in your project.\ntry running npm install react-scripts inside your project folder. This will install the missing module and allow you to run the npm start command successfully.\nIf you still encounter an error after running this command, you may need to delete the node_modules folder and run npm install again to reinstall all of the dependencies for your project.\n", "Try this first:\nnpm install react-scripts\n\nIf the error wasn't solved, then try this:\n\nrm -rf node_modules\nnpm cache clean -f\nnpm install\nnpm install react-scripts\n\n", "F:\\Tutorial & Practice\\Frontend development\\React Projects\\my-first-app. With this path to your project, I can see there is an & symbol in the Tutorial & Practice directory. Sometimes that might cause error. Try renaming the directory without any symbols or even spaces, just to be sure. Then run npm start. And if that still doesn't work delete the node_modules folder and then npm install again and see if that works.\n" ]
[ 0, 0, 0 ]
[]
[]
[ "node.js", "reactjs" ]
stackoverflow_0074669082_node.js_reactjs.txt
Q: Generate a connected line with different amplitude I'm trying to make a game like Line, but with a horizontal and not vertical wave. The problem is making that the wave continues even after changing its amplitude (I will change the frequency later). So far I have reached this part of wave: import pygame import pygame.gfxdraw import math import time DISPLAY_W, DISPLAY_H = 400, 800 clock = pygame.time.Clock() pygame.init() SCREEN = pygame.Surface((DISPLAY_W, DISPLAY_H)) GAME_DISPLAY = pygame.display.set_mode((DISPLAY_W, DISPLAY_H)) class Line(): def __init__(self): self.pointsList = [0]*800 self.listIndex = 0 def game(self): while True: clock.tick(60) SCREEN.fill((0, 0, 0)) self.listIndex += +1 self.generateWave() self.drawWave() for event in pygame.event.get(): if (event.type == pygame.QUIT): quit() pygame.display.update() GAME_DISPLAY.blit(SCREEN, (0, 0)) def drawWave(self): for Y_CORD in range(len(self.pointsList)): pygame.gfxdraw.pixel( GAME_DISPLAY, self.pointsList[Y_CORD]-55, DISPLAY_H-Y_CORD, (255, 255, 255)) pygame.gfxdraw.pixel( GAME_DISPLAY, self.pointsList[Y_CORD]-350, DISPLAY_H-Y_CORD, (255, 255, 255)) def generateWave(self): waveAmplitude = 50 waveFrequency = 1 XCord = int((DISPLAY_H/2) + waveAmplitude*math.sin( waveFrequency * ((float(0)/-DISPLAY_W)*(2*math.pi) + (time.time())))) if self.pointsList[-1] != 0: self.pointsList.pop(0) self.pointsList.append(XCord) else: self.pointsList[self.listIndex] = XCord if __name__ == "__main__": game = Line() game.game() I thought about having another function to change the amplitude, but then there would be a gap: A: One issue with your code is that you are using a variable called XCord to store the Y-coordinate of each point in the wave. This variable should be called YCord instead, since it represents the Y-coordinate of the point on the screen. Another issue is that you are using a variable called waveFrequency to control the speed of the wave. This variable should be called waveSpeed instead, since it controls the speed of the wave rather than its frequency. To fix the issue of the wave not continuing after changing the amplitude, you can modify the generateWave() function as follows: def generateWave(self, waveAmplitude): waveFrequency = 1 waveSpeed = 0.05 for i in range(len(self.pointsList)): YCord = int((DISPLAY_H/2) + waveAmplitude*math.sin( waveFrequency * ((float(i)/-DISPLAY_W)*(2*math.pi) + (time.time()*waveSpeed)))) if self.pointsList[i] != 0: self.pointsList[i] = YCord else: self.pointsList[i] = YCord In this updated version of the function, we loop through each point in the pointsList array and calculate its Y-coordinate using the given waveAmplitude value. We also use the waveSpeed variable to control the speed of the wave. This allows us to change the amplitude of the wave without creating a gap in the wave. You can then call this function with a desired value for waveAmplitude.
Generate a connected line with different amplitude
I'm trying to make a game like Line, but with a horizontal and not vertical wave. The problem is making that the wave continues even after changing its amplitude (I will change the frequency later). So far I have reached this part of wave: import pygame import pygame.gfxdraw import math import time DISPLAY_W, DISPLAY_H = 400, 800 clock = pygame.time.Clock() pygame.init() SCREEN = pygame.Surface((DISPLAY_W, DISPLAY_H)) GAME_DISPLAY = pygame.display.set_mode((DISPLAY_W, DISPLAY_H)) class Line(): def __init__(self): self.pointsList = [0]*800 self.listIndex = 0 def game(self): while True: clock.tick(60) SCREEN.fill((0, 0, 0)) self.listIndex += +1 self.generateWave() self.drawWave() for event in pygame.event.get(): if (event.type == pygame.QUIT): quit() pygame.display.update() GAME_DISPLAY.blit(SCREEN, (0, 0)) def drawWave(self): for Y_CORD in range(len(self.pointsList)): pygame.gfxdraw.pixel( GAME_DISPLAY, self.pointsList[Y_CORD]-55, DISPLAY_H-Y_CORD, (255, 255, 255)) pygame.gfxdraw.pixel( GAME_DISPLAY, self.pointsList[Y_CORD]-350, DISPLAY_H-Y_CORD, (255, 255, 255)) def generateWave(self): waveAmplitude = 50 waveFrequency = 1 XCord = int((DISPLAY_H/2) + waveAmplitude*math.sin( waveFrequency * ((float(0)/-DISPLAY_W)*(2*math.pi) + (time.time())))) if self.pointsList[-1] != 0: self.pointsList.pop(0) self.pointsList.append(XCord) else: self.pointsList[self.listIndex] = XCord if __name__ == "__main__": game = Line() game.game() I thought about having another function to change the amplitude, but then there would be a gap:
[ "One issue with your code is that you are using a variable called XCord to store the Y-coordinate of each point in the wave. This variable should be called YCord instead, since it represents the Y-coordinate of the point on the screen.\nAnother issue is that you are using a variable called waveFrequency to control the speed of the wave. This variable should be called waveSpeed instead, since it controls the speed of the wave rather than its frequency.\nTo fix the issue of the wave not continuing after changing the amplitude, you can modify the generateWave() function as follows:\ndef generateWave(self, waveAmplitude):\n waveFrequency = 1\n waveSpeed = 0.05\n for i in range(len(self.pointsList)):\n YCord = int((DISPLAY_H/2) + waveAmplitude*math.sin(\n waveFrequency * ((float(i)/-DISPLAY_W)*(2*math.pi) + (time.time()*waveSpeed))))\n\n if self.pointsList[i] != 0:\n self.pointsList[i] = YCord\n else:\n self.pointsList[i] = YCord\n\nIn this updated version of the function, we loop through each point in the pointsList array and calculate its Y-coordinate using the given waveAmplitude value. We also use the waveSpeed variable to control the speed of the wave. This allows us to change the amplitude of the wave without creating a gap in the wave.\nYou can then call this function with a desired value for waveAmplitude.\n" ]
[ 0 ]
[]
[]
[ "pygame", "python" ]
stackoverflow_0074649361_pygame_python.txt
Q: Angular 11 Http Interceptor within a Service I have an angular application that has a http interceptor which adds a bearer token: export class AuthorizeInterceptor implements HttpInterceptor { constructor(private authorize: AuthorizeService) { } intercept(req: HttpRequest<any>, next: HttpHandler): Observable<HttpEvent<any>> { return this.authorize.getAccessToken() .pipe(mergeMap(token => this.processRequestWithToken(token, req, next))); } // Checks if there is an access_token available in the authorize service // and adds it to the request in case it's targeted at the same origin as the // single page application. private processRequestWithToken(token: string | null, req: HttpRequest<any>, next: HttpHandler) { if (!!token && this.isSameOriginUrl(req)) { req = req.clone({ setHeaders: { Authorization: `Bearer ${token}` } }); } return next.handle(req); } private isSameOriginUrl(req: any) { // It's an absolute url with the same origin. if (req.url.startsWith(`${window.location.origin}/`)) { return true; } // It's a protocol relative url with the same origin. // For example: //www.example.com/api/Products if (req.url.startsWith(`//${window.location.host}/`)) { return true; } // It's a relative url like /api/Products if (/^\/[^\/].*/.test(req.url)) { return true; } // It's an absolute or protocol relative url that // doesn't have the same origin. return false; } } this works fine when I make a direct http call from within a component: constructor( http: HttpClient, @Inject('BASE_URL') baseUrl: string, private _Activatedroute:ActivatedRoute, private _CustomerService:CustomerService) { this.id = parseInt(this._Activatedroute.snapshot.paramMap.get("id")!); http.get<CustomerBase>(baseUrl + 'customer/getCustomerById?id=' + this.id).subscribe({ next: (data: CustomerBase) => { console.log(data); } }); } but I have a service that I want to call ngOnInit(): void { //using a service might not have the headers since it isn't intercepted by the auth interceptor this._CustomerService.getCustomerById(this.id!).subscribe({ but this isn't picked up by the interceptor and doesn't have the bearer token, hence it fails with a 401 Unauthorized I think I have added correctly to the app.module.ts providers: [ { provide: HTTP_INTERCEPTORS, useClass: AuthorizeInterceptor, multi: true } ], how can I ensure that http requests sent be a service are also picked up by the interceptor? A: Try to use the easier way to get token like this: @Injectable() export class AuthInterceptor implements HttpInterceptor { constructor() {} intercept( req: HttpRequest<any>, next: HttpHandler ): Observable<HttpEvent<any>> { const access_token = localStorage.getItem('access_token'); const authReq = req.clone({ setHeaders: { Authorization: 'Bearer ' + access_token } }); return next.handle(authReq); } } If it works fine, maybe your problem is defined in AppModule, AuthorizeService or CustomerService. 1. AppModule: Make sure you import HTTP_INTERCEPTOR and HttpClientModule properly: import { HTTP_INTERCEPTORS, HttpClientModule } from "@angular/common/http"; ... imports: [HttpClientModule] providers: [{ provide: HTTP_INTERCEPTORS, useClass: AuthInterceptor, multi: true, }] 2. CustomerService Make sure you are using HttpClient properly: import {HttpClient} from "@angular/common/http"; ... constructor(private http: HttpClient) {} ... http.get(...).pipe(); Don't use it like: import { HttpBackend, HttpClient } from '@angular/common/http'; ... constructor(private httpClient: HttpClient, handler: HttpBackend) { // if you use it in this way, you won't go through any interceptors this.http = new HttpClient(handler); }
Angular 11 Http Interceptor within a Service
I have an angular application that has a http interceptor which adds a bearer token: export class AuthorizeInterceptor implements HttpInterceptor { constructor(private authorize: AuthorizeService) { } intercept(req: HttpRequest<any>, next: HttpHandler): Observable<HttpEvent<any>> { return this.authorize.getAccessToken() .pipe(mergeMap(token => this.processRequestWithToken(token, req, next))); } // Checks if there is an access_token available in the authorize service // and adds it to the request in case it's targeted at the same origin as the // single page application. private processRequestWithToken(token: string | null, req: HttpRequest<any>, next: HttpHandler) { if (!!token && this.isSameOriginUrl(req)) { req = req.clone({ setHeaders: { Authorization: `Bearer ${token}` } }); } return next.handle(req); } private isSameOriginUrl(req: any) { // It's an absolute url with the same origin. if (req.url.startsWith(`${window.location.origin}/`)) { return true; } // It's a protocol relative url with the same origin. // For example: //www.example.com/api/Products if (req.url.startsWith(`//${window.location.host}/`)) { return true; } // It's a relative url like /api/Products if (/^\/[^\/].*/.test(req.url)) { return true; } // It's an absolute or protocol relative url that // doesn't have the same origin. return false; } } this works fine when I make a direct http call from within a component: constructor( http: HttpClient, @Inject('BASE_URL') baseUrl: string, private _Activatedroute:ActivatedRoute, private _CustomerService:CustomerService) { this.id = parseInt(this._Activatedroute.snapshot.paramMap.get("id")!); http.get<CustomerBase>(baseUrl + 'customer/getCustomerById?id=' + this.id).subscribe({ next: (data: CustomerBase) => { console.log(data); } }); } but I have a service that I want to call ngOnInit(): void { //using a service might not have the headers since it isn't intercepted by the auth interceptor this._CustomerService.getCustomerById(this.id!).subscribe({ but this isn't picked up by the interceptor and doesn't have the bearer token, hence it fails with a 401 Unauthorized I think I have added correctly to the app.module.ts providers: [ { provide: HTTP_INTERCEPTORS, useClass: AuthorizeInterceptor, multi: true } ], how can I ensure that http requests sent be a service are also picked up by the interceptor?
[ "Try to use the easier way to get token like this:\n@Injectable()\nexport class AuthInterceptor implements HttpInterceptor {\n constructor() {}\n\n intercept(\n req: HttpRequest<any>,\n next: HttpHandler\n ): Observable<HttpEvent<any>> {\n const access_token = localStorage.getItem('access_token');\n const authReq = req.clone({\n setHeaders: {\n Authorization: 'Bearer ' + access_token\n }\n });\n\n return next.handle(authReq);\n }\n}\n\nIf it works fine, maybe your problem is defined in AppModule, AuthorizeService or CustomerService.\n1. AppModule:\nMake sure you import HTTP_INTERCEPTOR and HttpClientModule properly:\nimport { HTTP_INTERCEPTORS, HttpClientModule } from \"@angular/common/http\";\n...\nimports: [HttpClientModule]\nproviders: [{\n provide: HTTP_INTERCEPTORS,\n useClass: AuthInterceptor,\n multi: true,\n}]\n\n2. CustomerService\nMake sure you are using HttpClient properly:\nimport {HttpClient} from \"@angular/common/http\";\n...\nconstructor(private http: HttpClient) {}\n...\nhttp.get(...).pipe();\n\nDon't use it like:\nimport { HttpBackend, HttpClient } from '@angular/common/http';\n...\nconstructor(private httpClient: HttpClient, handler: HttpBackend) {\n // if you use it in this way, you won't go through any interceptors\n this.http = new HttpClient(handler);\n}\n\n" ]
[ 0 ]
[]
[]
[ "angular" ]
stackoverflow_0074658596_angular.txt
Q: If a user grants access via a website, how to use that access on a different server? I'm trying to get some data from a user (searchconsole): the user first grants permission on a website. Then, the idea is to use that permission and retrieve the data with a python program that'll run on a different server. What is the easiest/safest way to achieve that? Should I use the same token for both servers? or is there a solution using the service account impersonation? (I'm stuck on that one) use the permission on the web server to add the service account as a searchconsole user? I tried to move the token from one server to another manually, and it works, but it seems suboptimal to use the same token for both servers. I also read the doc and all examples I could find, but didn't find my case even though it seems basic. A: Should I use the same token for both servers? Im not 100% sure what you mean by token, you can and probably should just store the refresh token from the user and then you can access their data when ever you need to. This is really how Oauth2 is supposed to work and maybe you could find a way of storing it in a database that both your fount end and backend can access. or is there a solution using the service account impersonation? (I'm stuck on that one) Service accounts should really only be used if you the developer control the account you are trying to connect to. or if you are a google workspace admin and want to control the data of everyone on your domain. impersonation can only be configured via google workspace and can only be configured to control users on the same domain. So standard google gmail users would be out. In the case of the webmaster tools api im not sure by checking the documentation that this api even supports service accounts use the permission on the web server to add the service account as a searchconsole user? I did just check my personal web master tools account and it appears that i have at some point in the past added a service account as a user on my account. For a service account to have access to an account it must be pre authorized. This is done as you can see by adding a user to your account. I cant remember how long ago I tested this from what i remember it did not work as the user needed to accept the authorization and there was no way to do that with a service account.
If a user grants access via a website, how to use that access on a different server?
I'm trying to get some data from a user (searchconsole): the user first grants permission on a website. Then, the idea is to use that permission and retrieve the data with a python program that'll run on a different server. What is the easiest/safest way to achieve that? Should I use the same token for both servers? or is there a solution using the service account impersonation? (I'm stuck on that one) use the permission on the web server to add the service account as a searchconsole user? I tried to move the token from one server to another manually, and it works, but it seems suboptimal to use the same token for both servers. I also read the doc and all examples I could find, but didn't find my case even though it seems basic.
[ "\nShould I use the same token for both servers?\n\nIm not 100% sure what you mean by token, you can and probably should just store the refresh token from the user and then you can access their data when ever you need to. This is really how Oauth2 is supposed to work and maybe you could find a way of storing it in a database that both your fount end and backend can access.\n\nor is there a solution using the service account impersonation? (I'm stuck on that one)\n\nService accounts should really only be used if you the developer control the account you are trying to connect to. or if you are a google workspace admin and want to control the data of everyone on your domain. impersonation can only be configured via google workspace and can only be configured to control users on the same domain. So standard google gmail users would be out.\nIn the case of the webmaster tools api im not sure by checking the documentation that this api even supports service accounts\n\nuse the permission on the web server to add the service account as a searchconsole user?\n\nI did just check my personal web master tools account and it appears that i have at some point in the past added a service account as a user on my account.\n\nFor a service account to have access to an account it must be pre authorized. This is done as you can see by adding a user to your account. I cant remember how long ago I tested this from what i remember it did not work as the user needed to accept the authorization and there was no way to do that with a service account.\n" ]
[ 0 ]
[]
[]
[ "google_api", "google_oauth", "google_search_console" ]
stackoverflow_0074673787_google_api_google_oauth_google_search_console.txt
Q: How to implement third Nelson's rule with Pandas? I am trying to implement Nelson's rules using Pandas. One of them is giving me grief, specifically number 3: Using some example data: data = pd.DataFrame({"values":[1,2,3,4,5,6,7,5,6,5,3]}) values 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 5 8 6 9 5 10 3 My first approach was to use a rolling window to check if they are in/decreasing with diff()>0 and use this to identify "hits" on the rule: (data.diff()>0).rolling(6).sum()==6 This correctly identifies the end values (1=True, 0=False): values correct /desired 0 0 0 1 0 1 2 0 1 3 0 1 4 0 1 5 0 1 6 1 1 7 0 0 8 0 0 9 0 0 10 0 0 This misses the first points (which are part of the run) because rolling is a look behind. Given this rule requires 6 points in a row, I essentially need to evaluate for a given point the 6 possible windows it can fall in and then mark it as true if it is part of any window in which the points are consecutively in/decreasing. I can think of how I could do this with some custom Python code with iterrows() or apply. I am, however keen to keep this performant, so want to limit myself to the Panda's API. How can this be achieved ? A: With the following toy dataframe (an extended version of yours): import pandas as pd df = pd.DataFrame({"values": [1, 2, 3, 4, 5, 6, 7, 5, 6, 5, 3, 11, 12, 13, 14, 15, 16, 4, 3, 8, 9, 10, 2]}) Here is one way to do it: # Find consecutive values df["check"] = (df.diff() > 0).rolling(6).sum() df["check"] = df.apply(lambda x: 1 if x["check"] >= 6 else pd.NA, axis=1) # Mark values for idx in df[df["check"] == 1].index: df.loc[idx - 5 : idx, "check"] = 1 # Set 0 for other values df = df.fillna(0) Then: print(df) # Output values check 0 1 0 1 2 1 2 3 1 3 4 1 4 5 1 5 6 1 6 7 1 7 5 0 8 6 0 9 5 0 10 3 0 11 11 1 12 12 1 13 13 1 14 14 1 15 15 1 16 16 1 17 4 0 18 3 0 19 8 0 20 9 0 21 10 0 22 2 0
How to implement third Nelson's rule with Pandas?
I am trying to implement Nelson's rules using Pandas. One of them is giving me grief, specifically number 3: Using some example data: data = pd.DataFrame({"values":[1,2,3,4,5,6,7,5,6,5,3]}) values 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 5 8 6 9 5 10 3 My first approach was to use a rolling window to check if they are in/decreasing with diff()>0 and use this to identify "hits" on the rule: (data.diff()>0).rolling(6).sum()==6 This correctly identifies the end values (1=True, 0=False): values correct /desired 0 0 0 1 0 1 2 0 1 3 0 1 4 0 1 5 0 1 6 1 1 7 0 0 8 0 0 9 0 0 10 0 0 This misses the first points (which are part of the run) because rolling is a look behind. Given this rule requires 6 points in a row, I essentially need to evaluate for a given point the 6 possible windows it can fall in and then mark it as true if it is part of any window in which the points are consecutively in/decreasing. I can think of how I could do this with some custom Python code with iterrows() or apply. I am, however keen to keep this performant, so want to limit myself to the Panda's API. How can this be achieved ?
[ "With the following toy dataframe (an extended version of yours):\nimport pandas as pd\n\n\ndf = pd.DataFrame({\"values\": [1, 2, 3, 4, 5, 6, 7, 5, 6, 5, 3, 11, 12, 13, 14, 15, 16, 4, 3, 8, 9, 10, 2]})\n\nHere is one way to do it:\n# Find consecutive values\ndf[\"check\"] = (df.diff() > 0).rolling(6).sum()\ndf[\"check\"] = df.apply(lambda x: 1 if x[\"check\"] >= 6 else pd.NA, axis=1)\n\n# Mark values\nfor idx in df[df[\"check\"] == 1].index:\n df.loc[idx - 5 : idx, \"check\"] = 1\n\n# Set 0 for other values\ndf = df.fillna(0)\n\nThen:\nprint(df)\n# Output\n values check\n0 1 0\n1 2 1\n2 3 1\n3 4 1\n4 5 1\n5 6 1\n6 7 1\n7 5 0\n8 6 0\n9 5 0\n10 3 0\n11 11 1\n12 12 1\n13 13 1\n14 14 1\n15 15 1\n16 16 1\n17 4 0\n18 3 0\n19 8 0\n20 9 0\n21 10 0\n22 2 0\n\n" ]
[ 0 ]
[]
[]
[ "pandas", "python" ]
stackoverflow_0074630430_pandas_python.txt
Q: TypeError: auroc() missing 1 required positional argument: 'task' I was trying to fine tune BERT base uncased on a small dataset of 1.5k fields which is quiet less however while running trainer.fit(model, data_module) when it goes to the 'model' for training which is: class ElectionTagger(pl.LightningModule): def __init__(self, n_classes: int, n_training_steps=None, n_warmup_steps=None): super().__init__() self.bert = BertModel.from_pretrained(BERT_MODEL_NAME, return_dict=True) self.classifier = nn.Linear(self.bert.config.hidden_size, n_classes) self.n_training_steps = n_training_steps self.n_warmup_steps = n_warmup_steps self.criterion = nn.BCELoss() def forward(self, input_ids, attention_mask, labels=None): output = self.bert(input_ids, attention_mask=attention_mask) output = self.classifier(output.pooler_output) output = torch.sigmoid(output) loss = 0 if labels is not None: loss = self.criterion(output, labels) return loss, output def training_step(self, batch, batch_idx): input_ids = batch["input_ids"] attention_mask = batch["attention_mask"] labels = batch["labels"] loss, outputs = self(input_ids, attention_mask, labels) self.log("train_loss", loss, prog_bar=True, logger=True) return {"loss": loss, "predictions": outputs, "labels": labels} def validation_step(self, batch, batch_idx): input_ids = batch["input_ids"] attention_mask = batch["attention_mask"] labels = batch["labels"] loss, outputs = self(input_ids, attention_mask, labels) self.log("val_loss", loss, prog_bar=True, logger=True) return loss def test_step(self, batch, batch_idx): input_ids = batch["input_ids"] attention_mask = batch["attention_mask"] labels = batch["labels"] loss, outputs = self(input_ids, attention_mask, labels) self.log("test_loss", loss, prog_bar=True, logger=True) return loss def training_epoch_end(self, outputs): labels = [] predictions = [] for output in outputs: for out_labels in output["labels"].detach().cpu(): labels.append(out_labels) for out_predictions in output["predictions"].detach().cpu(): predictions.append(out_predictions) labels = torch.stack(labels).int() predictions = torch.stack(predictions) for i, name in enumerate(LABEL_COLUMNS): class_roc_auc = auroc(predictions[:, i], labels[:, i]) ##### ERROR ARISES HERE### self.logger.experiment.add_scalar(f"{name}_roc_auc/Train", class_roc_auc, self.current_epoch) def configure_optimizers(self): optimizer = AdamW(self.parameters(), lr=2e-5) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=self.n_warmup_steps, num_training_steps=self.n_training_steps ) return dict( optimizer=optimizer, lr_scheduler=dict( scheduler=scheduler, interval='step' ) ) Instead of implementing, I got an error which says TypeError: auroc() missing 1 required positional argument: 'task' It would be great if anyone could provide a solution to this. A: The auroc function expects a task argument to be passed as the first argument, but it is not being passed in the call to auroc in the training_epoch_end method. To fix this error, you can pass the task argument to the auroc function like this: class ElectionTagger(pl.LightningModule): # ... def training_epoch_end(self, outputs): labels = [] predictions = [] for output in outputs: for out_labels in output["labels"].detach().cpu(): labels.append(out_labels) for out_predictions in output["predictions"].detach().cpu(): predictions.append(out_predictions) labels = torch.stack(labels).int() predictions = torch.stack(predictions) for i, name in enumerate(LABEL_COLUMNS): class_roc_auc = auroc(predictions[:, i], labels[:, i], task=name) self.logger.experiment.add_scalar(f"{name}_roc_auc/Train", class_roc_auc, self.current_epoch) # ... the auroc function is passed the task argument with the value of the name variable. This should fix the error you're encountering
TypeError: auroc() missing 1 required positional argument: 'task'
I was trying to fine tune BERT base uncased on a small dataset of 1.5k fields which is quiet less however while running trainer.fit(model, data_module) when it goes to the 'model' for training which is: class ElectionTagger(pl.LightningModule): def __init__(self, n_classes: int, n_training_steps=None, n_warmup_steps=None): super().__init__() self.bert = BertModel.from_pretrained(BERT_MODEL_NAME, return_dict=True) self.classifier = nn.Linear(self.bert.config.hidden_size, n_classes) self.n_training_steps = n_training_steps self.n_warmup_steps = n_warmup_steps self.criterion = nn.BCELoss() def forward(self, input_ids, attention_mask, labels=None): output = self.bert(input_ids, attention_mask=attention_mask) output = self.classifier(output.pooler_output) output = torch.sigmoid(output) loss = 0 if labels is not None: loss = self.criterion(output, labels) return loss, output def training_step(self, batch, batch_idx): input_ids = batch["input_ids"] attention_mask = batch["attention_mask"] labels = batch["labels"] loss, outputs = self(input_ids, attention_mask, labels) self.log("train_loss", loss, prog_bar=True, logger=True) return {"loss": loss, "predictions": outputs, "labels": labels} def validation_step(self, batch, batch_idx): input_ids = batch["input_ids"] attention_mask = batch["attention_mask"] labels = batch["labels"] loss, outputs = self(input_ids, attention_mask, labels) self.log("val_loss", loss, prog_bar=True, logger=True) return loss def test_step(self, batch, batch_idx): input_ids = batch["input_ids"] attention_mask = batch["attention_mask"] labels = batch["labels"] loss, outputs = self(input_ids, attention_mask, labels) self.log("test_loss", loss, prog_bar=True, logger=True) return loss def training_epoch_end(self, outputs): labels = [] predictions = [] for output in outputs: for out_labels in output["labels"].detach().cpu(): labels.append(out_labels) for out_predictions in output["predictions"].detach().cpu(): predictions.append(out_predictions) labels = torch.stack(labels).int() predictions = torch.stack(predictions) for i, name in enumerate(LABEL_COLUMNS): class_roc_auc = auroc(predictions[:, i], labels[:, i]) ##### ERROR ARISES HERE### self.logger.experiment.add_scalar(f"{name}_roc_auc/Train", class_roc_auc, self.current_epoch) def configure_optimizers(self): optimizer = AdamW(self.parameters(), lr=2e-5) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=self.n_warmup_steps, num_training_steps=self.n_training_steps ) return dict( optimizer=optimizer, lr_scheduler=dict( scheduler=scheduler, interval='step' ) ) Instead of implementing, I got an error which says TypeError: auroc() missing 1 required positional argument: 'task' It would be great if anyone could provide a solution to this.
[ "The auroc function expects a task argument to be passed as the first argument, but it is not being passed in the call to auroc in the training_epoch_end method. To fix this error, you can pass the task argument to the auroc function like this:\nclass ElectionTagger(pl.LightningModule):\n # ...\n\n def training_epoch_end(self, outputs):\n labels = []\n predictions = []\n for output in outputs:\n for out_labels in output[\"labels\"].detach().cpu():\n labels.append(out_labels)\n for out_predictions in output[\"predictions\"].detach().cpu():\n predictions.append(out_predictions)\n labels = torch.stack(labels).int()\n predictions = torch.stack(predictions)\n for i, name in enumerate(LABEL_COLUMNS):\n class_roc_auc = auroc(predictions[:, i], labels[:, i], task=name)\n self.logger.experiment.add_scalar(f\"{name}_roc_auc/Train\", class_roc_auc, self.current_epoch)\n\n # ...\n\n\nthe auroc function is passed the task argument with the value of the name variable. This should fix the error you're encountering\n" ]
[ 0 ]
[]
[]
[ "bert_language_model", "machine_learning", "pytorch", "pytorch_lightning", "typeerror" ]
stackoverflow_0074673353_bert_language_model_machine_learning_pytorch_pytorch_lightning_typeerror.txt
Q: How to solve "invoke-rc.d: policy-rc.d denied execution of start." when building a docker container Ubuntu 20.04 and installing jenkins? I am trying to install jenkins on ubuntu:20.04 docker container and policy denied to start jenkins server. below error response when i'm trying to install jenkins. Created symlink /etc/systemd/system/multi-user.target.wants/jenkins.service → /lib/systemd/system/jenkins.service. invoke-rc.d: could not determine current runlevel invoke-rc.d: policy-rc.d denied execution of start. How to solve that issue? If it is an issue. ... invoke-rc.d: policy-rc.d denied execution of start. ... A: The policy-rc.d has one sole purpose,it should tell to invoke-rc.d if the action is allowed or not, by using its exit status. From debian docs Debian policy states that packages providing system services need to start those services by default, and that the starting of the service should be done by way of the /usr/sbin/invoke-rc.d script. This script will execute a program /usr/sbin/policy-rc.d if it exists, allowing the local system administrator to override behaviour if wanted by creating a policy script according to the interface specified and installing it as /usr/sbin/policy-rc.d. You should provide us with more information. Your Dockerfile. Are you running Docker inside Docker?
How to solve "invoke-rc.d: policy-rc.d denied execution of start." when building a docker container Ubuntu 20.04 and installing jenkins?
I am trying to install jenkins on ubuntu:20.04 docker container and policy denied to start jenkins server. below error response when i'm trying to install jenkins. Created symlink /etc/systemd/system/multi-user.target.wants/jenkins.service → /lib/systemd/system/jenkins.service. invoke-rc.d: could not determine current runlevel invoke-rc.d: policy-rc.d denied execution of start. How to solve that issue? If it is an issue. ... invoke-rc.d: policy-rc.d denied execution of start. ...
[ "The policy-rc.d has one sole purpose,it should tell to invoke-rc.d if the action is allowed or not, by using its exit status. From debian docs\n Debian policy states that packages providing system services need to start those services\n by default, and that the starting of the service should be done by way of the\n /usr/sbin/invoke-rc.d script. This script will execute a program /usr/sbin/policy-rc.d if\n it exists, allowing the local system administrator to override behaviour if wanted by\n creating a policy script according to the interface specified and installing it as\n /usr/sbin/policy-rc.d.\n\nYou should provide us with more information. Your Dockerfile. Are you running Docker inside Docker?\n" ]
[ 0 ]
[]
[]
[ "cicd", "docker", "jenkins", "ubuntu", "ubuntu_20.04" ]
stackoverflow_0074674292_cicd_docker_jenkins_ubuntu_ubuntu_20.04.txt
Q: Sending JSON to Flask, request.args vs request.form My understanding is that request.args in Flask contains the URL encoded parameters from a GET request while request.form contains POST data. What I'm having a hard time grasping is why when sending a POST request, trying to access the data with request.form returns a 400 error but when I try to access it with request.args it seems to work fine. I have tried sending the request with both Postman and curl and the results are identical. curl -X POST -d {"name":"Joe"} http://127.0.0.1:8080/testpoint --header "Content-Type:application/json" Code: @app.route('/testpoint', methods = ['POST']) def testpoint(): name = request.args.get('name', '') return jsonify(name = name) A: You are POST-ing JSON, neither request.args nor request.form will work. request.form works only if you POST data with the right content types; form data is either POSTed with the application/x-www-form-urlencoded or multipart/form-data encodings. When you use application/json, you are no longer POSTing form data. Use request.get_json() to access JSON POST data instead: @app.route('/testpoint', methods = ['POST']) def testpoint(): name = request.get_json().get('name', '') return jsonify(name = name) As you state, request.args only ever contains values included in the request query string, the optional part of a URL after the ? question mark. Since it’s part of the URL, it is independent from the POST request body. A: Your json data in curl is wrong, so Flask does not parse data to form. Send data like this: '{"name":"Joe"}' curl -X POST -d '{"name":"Joe"}' http://example.com:8080/testpoint --header "Content-Type:application/json" A: just change args for form and it will work @app.route('/testpoint', methods = ['POST']) def testpoint(): name = request.form.get('name', '')`enter code here` return jsonify(name = name)
Sending JSON to Flask, request.args vs request.form
My understanding is that request.args in Flask contains the URL encoded parameters from a GET request while request.form contains POST data. What I'm having a hard time grasping is why when sending a POST request, trying to access the data with request.form returns a 400 error but when I try to access it with request.args it seems to work fine. I have tried sending the request with both Postman and curl and the results are identical. curl -X POST -d {"name":"Joe"} http://127.0.0.1:8080/testpoint --header "Content-Type:application/json" Code: @app.route('/testpoint', methods = ['POST']) def testpoint(): name = request.args.get('name', '') return jsonify(name = name)
[ "You are POST-ing JSON, neither request.args nor request.form will work.\nrequest.form works only if you POST data with the right content types; form data is either POSTed with the application/x-www-form-urlencoded or multipart/form-data encodings.\nWhen you use application/json, you are no longer POSTing form data. Use request.get_json() to access JSON POST data instead:\[email protected]('/testpoint', methods = ['POST'])\ndef testpoint():\n name = request.get_json().get('name', '')\n return jsonify(name = name)\n\nAs you state, request.args only ever contains values included in the request query string, the optional part of a URL after the ? question mark. Since it’s part of the URL, it is independent from the POST request body.\n", "Your json data in curl is wrong, so Flask does not parse data to form.\nSend data like this: '{\"name\":\"Joe\"}'\ncurl -X POST -d '{\"name\":\"Joe\"}' http://example.com:8080/testpoint --header \"Content-Type:application/json\"\n\n", "just change args for form and it will work\[email protected]('/testpoint', methods = ['POST'])\ndef testpoint():\n name = request.form.get('name', '')`enter code here`\n return jsonify(name = name)\n\n" ]
[ 63, 3, 0 ]
[]
[]
[ "flask", "json", "post", "python", "rest" ]
stackoverflow_0023326368_flask_json_post_python_rest.txt
Q: I need to write react native code to return a single array of data from given data Here is my sample data const data = [{"amount": "600,000", "cover": null, "id": "1", "img": "636e56de36301.1.png", "make": "bmw", "model": "bmw", "name": "APA", "policy": "Motor Insurance", "rate": "6"}, {"amount": "300,000", "cover": null, "id": "2", "img": "63723574a81ce.1.png", "make": "ferrari", "model": "ferrari", "name": "CIC", "policy": "Motor Insurance", "rate": "3"}, {"amount": "450,000", "cover": null, "id": "3", "img": "63723726cb1df.1.png", "make": "audi", "model": "audi", "name": "Mayfair Insurance", "policy": "Motor Insurance", "rate": "4.5"}] and here is the code am using to return the array for id 3. const data = [{"amount": "600,000", "cover": null, "id": "1", "img": "636e56de36301.1.png", "make": "bmw", "model": "bmw", "name": "APA", "policy": "Motor Insurance", "rate": "6"}, {"amount": "300,000", "cover": null, "id": "2", "img": "63723574a81ce.1.png", "make": "ferrari", "model": "ferrari", "name": "CIC", "policy": "Motor Insurance", "rate": "3"}, {"amount": "450,000", "cover": null, "id": "3", "img": "63723726cb1df.1.png", "make": "audi", "model": "audi", "name": "Mayfair Insurance", "policy": "Motor Insurance", "rate": "4.5"}] const id = ['3'] const provider = data.reduce((prv, item) => { if(id.includes(item.id)){ return prv } return prv }); console.log('This is provider' ,provider); Unfortunately, the return am getting is data with an id of 1 Output: This is provider {"amount": "600,000", "cover": null, "id": "1", "img": "636e56de36301.1.png", "make": "bmw", "model": "bmw", "name": "APA", "policy": "Motor Insurance", "rate": "6"} can someone tell what am doing wrong please A: If your intention is to return only an object you can use reduce() but currently, you're doing it a little bit wrong way. You need to pass an initial value to the reduce() if it's not then the reduce picks the first element of the array as the initial value and starts iterating from the second element. so when condition(s) are false it returns the first element of the array. so you need to be careful with it. You need to return item here if(id.includes(item.id)) return item not prev. Here is the solution with .reduce() const data = [{"amount": "600,000", "cover": null, "id": "1", "img": "636e56de36301.1.png", "make": "bmw", "model": "bmw", "name": "APA", "policy": "Motor Insurance", "rate": "6"}, {"amount": "300,000", "cover": null, "id": "2", "img": "63723574a81ce.1.png", "make": "ferrari", "model": "ferrari", "name": "CIC", "policy": "Motor Insurance", "rate": "3"}, {"amount": "450,000", "cover": null, "id": "3", "img": "63723726cb1df.1.png", "make": "audi", "model": "audi", "name": "Mayfair Insurance", "policy": "Motor Insurance", "rate": "4.5"}] const id = ['3']; const provider = data.reduce((prv, item) => { if(id.includes(item.id)){ return item } return prv }, {}); console.log(provider); Also You can use .filter() const data = [{"amount": "600,000", "cover": null, "id": "1", "img": "636e56de36301.1.png", "make": "bmw", "model": "bmw", "name": "APA", "policy": "Motor Insurance", "rate": "6"}, {"amount": "300,000", "cover": null, "id": "2", "img": "63723574a81ce.1.png", "make": "ferrari", "model": "ferrari", "name": "CIC", "policy": "Motor Insurance", "rate": "3"}, {"amount": "450,000", "cover": null, "id": "3", "img": "63723726cb1df.1.png", "make": "audi", "model": "audi", "name": "Mayfair Insurance", "policy": "Motor Insurance", "rate": "4.5"}] const id = ['3']; console.log(data.filter(it => id.includes(it.id))); A: It looks like your code is not working because you're using the Array.reduce() method incorrectly. The reduce() method is used to reduce an array to a single value, not to filter out values from the array. To filter out the data with the id of 3, you can use the Array.filter() method instead. Here's how your code would look with the filter() method: const data = [ { "amount": "600,000", "cover": null, "id": "1", "img": "636e56de36301.1.png", "make": "bmw", "model": "bmw", "name": "APA", "policy": "Motor Insurance", "rate": "6" }, { "amount": "300,000", "cover": null, "id": "2", "img": "63723574a81ce.1.png", "make": "ferrari", "model": "ferrari", "name": "CIC", "policy": "Motor Insurance", "rate": "3" }, { "amount": "450,000", "cover": null, "id": "3", "img": "63723726cb1df.1.png", "make": "audi", "model": "audi", "name": "Mayfair Insurance", "policy": "Motor Insurance", "rate": "4.5" } ]; const id = ['3']; const provider = data.filter(item => id.includes(item.id)); console.log('This is provider', provider); This should output the following data: [ { "amount": "450,000", "cover": null, "id": "3", "img": "63723726cb1df.1.png", "make": "audi", "model": "audi", "name": "Mayfair Insurance", "policy": "Motor Insurance", "rate": "4.5" } ]
I need to write react native code to return a single array of data from given data
Here is my sample data const data = [{"amount": "600,000", "cover": null, "id": "1", "img": "636e56de36301.1.png", "make": "bmw", "model": "bmw", "name": "APA", "policy": "Motor Insurance", "rate": "6"}, {"amount": "300,000", "cover": null, "id": "2", "img": "63723574a81ce.1.png", "make": "ferrari", "model": "ferrari", "name": "CIC", "policy": "Motor Insurance", "rate": "3"}, {"amount": "450,000", "cover": null, "id": "3", "img": "63723726cb1df.1.png", "make": "audi", "model": "audi", "name": "Mayfair Insurance", "policy": "Motor Insurance", "rate": "4.5"}] and here is the code am using to return the array for id 3. const data = [{"amount": "600,000", "cover": null, "id": "1", "img": "636e56de36301.1.png", "make": "bmw", "model": "bmw", "name": "APA", "policy": "Motor Insurance", "rate": "6"}, {"amount": "300,000", "cover": null, "id": "2", "img": "63723574a81ce.1.png", "make": "ferrari", "model": "ferrari", "name": "CIC", "policy": "Motor Insurance", "rate": "3"}, {"amount": "450,000", "cover": null, "id": "3", "img": "63723726cb1df.1.png", "make": "audi", "model": "audi", "name": "Mayfair Insurance", "policy": "Motor Insurance", "rate": "4.5"}] const id = ['3'] const provider = data.reduce((prv, item) => { if(id.includes(item.id)){ return prv } return prv }); console.log('This is provider' ,provider); Unfortunately, the return am getting is data with an id of 1 Output: This is provider {"amount": "600,000", "cover": null, "id": "1", "img": "636e56de36301.1.png", "make": "bmw", "model": "bmw", "name": "APA", "policy": "Motor Insurance", "rate": "6"} can someone tell what am doing wrong please
[ "If your intention is to return only an object you can use reduce() but currently, you're doing it a little bit wrong way.\n\nYou need to pass an initial value to the reduce() if it's not then the reduce picks the first element of the array as the initial value and starts iterating from the second element. so when condition(s) are false it returns the first element of the array. so you need to be careful with it.\nYou need to return item here if(id.includes(item.id)) return item not prev.\n\nHere is the solution with .reduce()\n\n\nconst data = [{\"amount\": \"600,000\", \"cover\": null, \"id\": \"1\", \"img\": \"636e56de36301.1.png\", \"make\": \"bmw\", \"model\": \"bmw\", \"name\": \"APA\", \"policy\": \"Motor Insurance\", \"rate\": \"6\"}, {\"amount\": \"300,000\", \"cover\": null, \"id\": \"2\", \"img\": \"63723574a81ce.1.png\", \"make\": \"ferrari\", \"model\": \"ferrari\", \"name\": \"CIC\", \"policy\": \"Motor Insurance\", \"rate\": \"3\"}, {\"amount\": \"450,000\", \"cover\": null, \"id\": \"3\", \"img\": \"63723726cb1df.1.png\", \"make\": \"audi\", \"model\": \"audi\", \"name\": \"Mayfair Insurance\", \"policy\": \"Motor Insurance\", \"rate\": \"4.5\"}]\n\nconst id = ['3'];\nconst provider = data.reduce((prv, item) => {\n if(id.includes(item.id)){\n return item \n }\nreturn prv\n}, {});\n\nconsole.log(provider);\n\n\n\nAlso You can use .filter()\n\n\nconst data = [{\"amount\": \"600,000\", \"cover\": null, \"id\": \"1\", \"img\": \"636e56de36301.1.png\", \"make\": \"bmw\", \"model\": \"bmw\", \"name\": \"APA\", \"policy\": \"Motor Insurance\", \"rate\": \"6\"}, {\"amount\": \"300,000\", \"cover\": null, \"id\": \"2\", \"img\": \"63723574a81ce.1.png\", \"make\": \"ferrari\", \"model\": \"ferrari\", \"name\": \"CIC\", \"policy\": \"Motor Insurance\", \"rate\": \"3\"}, {\"amount\": \"450,000\", \"cover\": null, \"id\": \"3\", \"img\": \"63723726cb1df.1.png\", \"make\": \"audi\", \"model\": \"audi\", \"name\": \"Mayfair Insurance\", \"policy\": \"Motor Insurance\", \"rate\": \"4.5\"}]\n\nconst id = ['3'];\nconsole.log(data.filter(it => id.includes(it.id)));\n\n\n\n", "It looks like your code is not working because you're using the Array.reduce() method incorrectly. The reduce() method is used to reduce an array to a single value, not to filter out values from the array.\nTo filter out the data with the id of 3, you can use the Array.filter() method instead. Here's how your code would look with the filter() method:\nconst data = [\n {\n \"amount\": \"600,000\",\n \"cover\": null,\n \"id\": \"1\",\n \"img\": \"636e56de36301.1.png\",\n \"make\": \"bmw\",\n \"model\": \"bmw\",\n \"name\": \"APA\",\n \"policy\": \"Motor Insurance\",\n \"rate\": \"6\"\n },\n {\n \"amount\": \"300,000\",\n \"cover\": null,\n \"id\": \"2\",\n \"img\": \"63723574a81ce.1.png\",\n \"make\": \"ferrari\",\n \"model\": \"ferrari\",\n \"name\": \"CIC\",\n \"policy\": \"Motor Insurance\",\n \"rate\": \"3\"\n },\n {\n \"amount\": \"450,000\",\n \"cover\": null,\n \"id\": \"3\",\n \"img\": \"63723726cb1df.1.png\",\n \"make\": \"audi\",\n \"model\": \"audi\",\n \"name\": \"Mayfair Insurance\",\n \"policy\": \"Motor Insurance\",\n \"rate\": \"4.5\"\n }\n];\n\nconst id = ['3'];\n\nconst provider = data.filter(item => id.includes(item.id));\nconsole.log('This is provider', provider);\n\nThis should output the following data:\n[\n {\n \"amount\": \"450,000\",\n \"cover\": null,\n \"id\": \"3\",\n \"img\": \"63723726cb1df.1.png\",\n \"make\": \"audi\",\n \"model\": \"audi\",\n \"name\": \"Mayfair Insurance\",\n \"policy\": \"Motor Insurance\",\n \"rate\": \"4.5\"\n }\n]\n\n" ]
[ 1, 0 ]
[]
[]
[ "javascript", "node.js", "react_native" ]
stackoverflow_0074665717_javascript_node.js_react_native.txt
Q: Rector how to avoid formatting source code? When running a simple Rector rule NormalizeNamespaceByPSR4ComposerAutoloadFileSystemRector the source code will be completely formatted, line breaks and spaces that are used to beautify the source code are removed. How can this be avoided in Rector? $services->set(NormalizeNamespaceByPSR4ComposerAutoloadFileSystemRector::class); ... is the only service config in rector.php. Rector says "! [NOTE] File ..." will be added:" So according to the rule it creates a new file (actually it is the same file...). I susspect this looks new for Rector because of the added namespace by the rule. A: When using Rector to apply a rule to your code, it will automatically format the code according to the rule. This includes removing any unnecessary whitespace and line breaks. If you want to avoid this, you can use the --dry-run option when running Rector. This will show you the changes that the rule will make to your code without actually applying those changes. You can then review the changes and decide whether or not you want to apply them. To use the --dry-run option, you would run the following command: php vendor/bin/rector process /path/to/your/code --dry-run This will show you a diff of the changes that Rector will make to your code without actually making those changes. You can then review the changes and decide whether or not you want to apply them. As for the issue of Rector creating a new file, it sounds like you may have misunderstood what the rule is doing. Rector does not create new files - it simply applies the rule to the files in the specified directory. In this case, the rule is probably adding a namespace declaration to the top of each file, which may be causing Rector to think that the file is new. However, this is just a side effect of the rule, and the original file will not be replaced or deleted.
Rector how to avoid formatting source code?
When running a simple Rector rule NormalizeNamespaceByPSR4ComposerAutoloadFileSystemRector the source code will be completely formatted, line breaks and spaces that are used to beautify the source code are removed. How can this be avoided in Rector? $services->set(NormalizeNamespaceByPSR4ComposerAutoloadFileSystemRector::class); ... is the only service config in rector.php. Rector says "! [NOTE] File ..." will be added:" So according to the rule it creates a new file (actually it is the same file...). I susspect this looks new for Rector because of the added namespace by the rule.
[ "When using Rector to apply a rule to your code, it will automatically format the code according to the rule. This includes removing any unnecessary whitespace and line breaks. If you want to avoid this, you can use the --dry-run option when running Rector. This will show you the changes that the rule will make to your code without actually applying those changes. You can then review the changes and decide whether or not you want to apply them.\nTo use the --dry-run option, you would run the following command:\nphp vendor/bin/rector process /path/to/your/code --dry-run\n\n\nThis will show you a diff of the changes that Rector will make to your code without actually making those changes. You can then review the changes and decide whether or not you want to apply them.\nAs for the issue of Rector creating a new file, it sounds like you may have misunderstood what the rule is doing. Rector does not create new files - it simply applies the rule to the files in the specified directory. In this case, the rule is probably adding a namespace declaration to the top of each file, which may be causing Rector to think that the file is new. However, this is just a side effect of the rule, and the original file will not be replaced or deleted.\n" ]
[ 1 ]
[]
[]
[ "php", "rector" ]
stackoverflow_0063372352_php_rector.txt
Q: how to type right } in lstinline how to type right } in lstinline. I cannot insert \lstinline{{}} The left{ is work well, but the right cause error. Extra }, or forgotten \endgroup. Thanks for your help. A: You can use some other character, which does not occur in your listing, to delimit the lstinline argument: \documentclass{article} \usepackage{listings} \begin{document} \lstinline|{}| \lstinline@{}@ \lstinline!{}! \lstinline*{}* \end{document}
how to type right } in lstinline
how to type right } in lstinline. I cannot insert \lstinline{{}} The left{ is work well, but the right cause error. Extra }, or forgotten \endgroup. Thanks for your help.
[ "You can use some other character, which does not occur in your listing, to delimit the lstinline argument:\n\\documentclass{article}\n\n\\usepackage{listings}\n\n\\begin{document}\n\n\\lstinline|{}|\n\n\\lstinline@{}@\n\n\\lstinline!{}!\n\n\\lstinline*{}*\n\n\\end{document}\n\n" ]
[ 0 ]
[]
[]
[ "latex", "lstlisting" ]
stackoverflow_0074673137_latex_lstlisting.txt
Q: What should be the correct code in order to get the factorial of n? n=int(input("Enter a number: ")) p=1 for i in range(n): p*=i print(p) I wanted to find out the factorial of a number but I always get 0 as output. A: The factorial of a number is the product of all the numbers from 1 to that number. However, in your code, you are starting the loop from 0 and then multiplying the product by the loop variable. This means that the product will always be 0 because any number multiplied by 0 is 0. You can change the starting value of the loop variable to 1 instead of 0. This way, the product will be initialized to 1 and then multiplied by the numbers from 1 to n, which is the correct way to calculate the factorial of a number. n = int(input("Enter a number: ")) p = 1 for i in range(1, n+1): p *= i print(p) You could also just use the math library which is built-in. import math n = int(input("Enter a number: ")) p = math.factorial(n) print(p) A: The code you provided does not return the correct result because the loop variable i is being used to calculate the factorial, but it is not initialized to the correct value. The i variable is initialized to 0 in the range() function, but the factorial of 0 is not defined. Instead, the loop variable should be initialized to 1 in order to correctly calculate the factorial. Here is an example of how you can modify the code to correctly calculate the factorial of a number: # Get the input number n = int(input("Enter a number: ")) # Initialize the result to 1 p = 1 # Loop over the numbers from 1 to n for i in range(1, n+1): # Multiply the result by the current number p *= i # Print the result print(p) In this example, the loop variable i is initialized to 1 in the range() function, which ensures that the factorial is calculated correctly. The loop variable is incremented by 1 each time the loop is executed, and the result is multiplied by the current value of the loop variable. This allows the code to correctly calculate the factorial of any number.
What should be the correct code in order to get the factorial of n?
n=int(input("Enter a number: ")) p=1 for i in range(n): p*=i print(p) I wanted to find out the factorial of a number but I always get 0 as output.
[ "The factorial of a number is the product of all the numbers from 1 to that number. However, in your code, you are starting the loop from 0 and then multiplying the product by the loop variable. This means that the product will always be 0 because any number multiplied by 0 is 0.\nYou can change the starting value of the loop variable to 1 instead of 0. This way, the product will be initialized to 1 and then multiplied by the numbers from 1 to n, which is the correct way to calculate the factorial of a number.\nn = int(input(\"Enter a number: \"))\np = 1\nfor i in range(1, n+1):\n p *= i\nprint(p)\n\nYou could also just use the math library which is built-in.\nimport math\n\nn = int(input(\"Enter a number: \"))\np = math.factorial(n)\nprint(p)\n\n", "The code you provided does not return the correct result because the loop variable i is being used to calculate the factorial, but it is not initialized to the correct value. The i variable is initialized to 0 in the range() function, but the factorial of 0 is not defined. Instead, the loop variable should be initialized to 1 in order to correctly calculate the factorial.\nHere is an example of how you can modify the code to correctly calculate the factorial of a number:\n# Get the input number\nn = int(input(\"Enter a number: \"))\n\n# Initialize the result to 1\np = 1\n\n# Loop over the numbers from 1 to n\nfor i in range(1, n+1):\n # Multiply the result by the current number\n p *= i\n\n# Print the result\nprint(p)\n\nIn this example, the loop variable i is initialized to 1 in the range() function, which ensures that the factorial is calculated correctly. The loop variable is incremented by 1 each time the loop is executed, and the result is multiplied by the current value of the loop variable. This allows the code to correctly calculate the factorial of any number.\n" ]
[ 0, 0 ]
[]
[]
[ "factorial", "numbers", "python" ]
stackoverflow_0074674629_factorial_numbers_python.txt
Q: How do i can fix button bifurcation in PPT? I tried to create a simple programmable button editor view and this is what I see when I view the slide slide view Button bifurcates! How can this problem be solved? A: I tried trying to fix it by poking at everything I see and saw the monitor selection option. I have 3 screens, I chose the main monitor. This was the reason.
How do i can fix button bifurcation in PPT?
I tried to create a simple programmable button editor view and this is what I see when I view the slide slide view Button bifurcates! How can this problem be solved?
[ "I tried trying to fix it by poking at everything I see and saw the monitor selection option. I have 3 screens, I chose the main monitor. This was the reason.\n" ]
[ 0 ]
[]
[]
[ "powerpoint", "vba" ]
stackoverflow_0074666732_powerpoint_vba.txt
Q: Do Mongoose query calls throw exceptions? I have several interesting questions. I am using mongoose and node.js Let's imagine I have a schema called Actor and all questions will be about it. 1) const saveActor = await Actor.save(); Does this throw an exception all the time or is it mandatory and precaution to check it like the following? : if(saveActor) return "success"; else return "error"; ? 2) const actor = Actor.findByIdAndUpdate("5ca509acd0ddef4d1c1c892f", someotheroptions); If not found, it returns null. So, I must check if it returns null - Does it mean the document was not found? What about the update? What if update doesn't work - Does it throw an exception all the time or does it sometimes return null? 3) What about other mongoose functions? findByIdAndRemove? what is the sum up? Do they throw exceptions all the time or sometimes? I can't find this information in docs. A: 1: const saveActor = await Actor.save(). saveActor will contain the success return value. If error occurred, it'll throw and error which you've to catch; for async/await syntax it's done like: try { const saveActor = await Actor.save(); } catch (e) { console.error(e) } 2: const actor = Actor.findByIdAndUpdate("5ca509acd0ddef4d1c1c892f", someotheroptions) If the query was successful: there was no matching entry, then it will return null. So, I must check if it returns null - Does it mean the document was not found? -> Yes if a match was found, it will return the successfull operation value (the document which was updated) If error occurred, throws an error. If you're using callback, error will be passed to the callback as param. If you're using thenables or Promise/async await then you'll have to catch. 3: Different method have different return type. For example (from docs): Mongoose.prototype.model() Returns: «Model» The model associated with name. Mongoose will create the model if it doesn't already exist. Model.find() Returns: «Query» Errors As for errors, in general, if the method accepts callback, error is a parameter like (err, doc) => { if (err) console.error(err) ... } If you're not passing callback, then it's usually thenables or Promise (more here). It's handled like Actor.findByIdAndUpdate("5ca509acd0ddef4d1c1c892f", someotheroptions).then(data => console.log(data)).catch(err => console.error(err)) etc. Full mongoose API. Refer MDN for Promise and async/await A: Q1: await Actor.save() will throw an error Q2/Q3: You can use orFail() like so: await Actor.findByIdAndUpdate("5ca509acd0ddef4d1c1c892f", someotheroptions).orFail(); Or: await Actor.findByIdAndUpdate("5ca509acd0ddef4d1c1c892f", someotheroptions).orFail(new Error('Document not found'));
Do Mongoose query calls throw exceptions?
I have several interesting questions. I am using mongoose and node.js Let's imagine I have a schema called Actor and all questions will be about it. 1) const saveActor = await Actor.save(); Does this throw an exception all the time or is it mandatory and precaution to check it like the following? : if(saveActor) return "success"; else return "error"; ? 2) const actor = Actor.findByIdAndUpdate("5ca509acd0ddef4d1c1c892f", someotheroptions); If not found, it returns null. So, I must check if it returns null - Does it mean the document was not found? What about the update? What if update doesn't work - Does it throw an exception all the time or does it sometimes return null? 3) What about other mongoose functions? findByIdAndRemove? what is the sum up? Do they throw exceptions all the time or sometimes? I can't find this information in docs.
[ "1: const saveActor = await Actor.save().\nsaveActor will contain the success return value. If error occurred, it'll throw and error which you've to catch; for async/await syntax it's done like:\ntry {\n const saveActor = await Actor.save();\n} catch (e) {\n console.error(e)\n}\n\n2: const actor = Actor.findByIdAndUpdate(\"5ca509acd0ddef4d1c1c892f\", someotheroptions)\nIf the query was successful:\n\nthere was no matching entry, then it will return null. So, I must check if it returns null - Does it mean the document was not found? -> Yes\nif a match was found, it will return the successfull operation value (the document which was updated)\n\nIf error occurred, throws an error. If you're using callback, error will be passed to the callback as param. If you're using thenables or Promise/async await then you'll have to catch.\n3: Different method have different return type. For example (from docs):\nMongoose.prototype.model()\nReturns:\n\n «Model» The model associated with name. Mongoose will create the model if it doesn't already exist.\n\nModel.find()\nReturns:\n\n «Query» \n\nErrors\nAs for errors, in general, if the method accepts callback, error is a parameter like (err, doc) => { if (err) console.error(err) ... }\nIf you're not passing callback, then it's usually thenables or Promise (more here). It's handled like Actor.findByIdAndUpdate(\"5ca509acd0ddef4d1c1c892f\", someotheroptions).then(data => console.log(data)).catch(err => console.error(err)) etc.\nFull mongoose API. Refer MDN for Promise and async/await\n", "Q1: await Actor.save() will throw an error\nQ2/Q3: You can use orFail() like so:\nawait Actor.findByIdAndUpdate(\"5ca509acd0ddef4d1c1c892f\", someotheroptions).orFail();\n\nOr:\nawait Actor.findByIdAndUpdate(\"5ca509acd0ddef4d1c1c892f\", someotheroptions).orFail(new Error('Document not found'));\n\n" ]
[ 1, 1 ]
[]
[]
[ "mongodb", "mongoose", "node.js" ]
stackoverflow_0055504835_mongodb_mongoose_node.js.txt
Q: breakpoints in cuda do not work! with a very simple code, hello world, the breakpoint is not working. I can't write the exact comment since it's not written in English, but it's like 'the symbols of this document are not loaded' or something. there's not cuda codes, just only one line printf in main function. The working environment is windows7 64bit, vc++2008 sp1, cuda toolkit 3.1 64bits. Please give me some explanation on this. :) A: So this is just a host application (i.e. nothing to do with CUDA) doing printf that you can't debug? Have you selected "Debug" as the configuration instead of "Release"? A: Are you trying to use a Visual Studio breakpoint to stop in your CUDA device code (.cu)? If that is the case, then I'm pretty sure that you can't do that. NVIDIA has released Parallel NSIGHT, which should allow you to do debugging of CUDA device code (.cu), though I don't have much experience with it myself. A: Did you compile with -g -G options as noted in the documentation? NVCC, the NVIDIA CUDA compiler driver, provides a mechanism for generating the debugging information necessary for CUDA-GDB to work properly. The -g -G option pair must be passed to NVCC when an application is compiled for ease of debugging with CUDA-GDB; for example, nvcc -g -G foo.cu -o foo here: https://docs.nvidia.com/cuda/cuda-gdb/index.html
breakpoints in cuda do not work!
with a very simple code, hello world, the breakpoint is not working. I can't write the exact comment since it's not written in English, but it's like 'the symbols of this document are not loaded' or something. there's not cuda codes, just only one line printf in main function. The working environment is windows7 64bit, vc++2008 sp1, cuda toolkit 3.1 64bits. Please give me some explanation on this. :)
[ "So this is just a host application (i.e. nothing to do with CUDA) doing printf that you can't debug? Have you selected \"Debug\" as the configuration instead of \"Release\"?\n", "Are you trying to use a Visual Studio breakpoint to stop in your CUDA device code (.cu)? If that is the case, then I'm pretty sure that you can't do that. NVIDIA has released Parallel NSIGHT, which should allow you to do debugging of CUDA device code (.cu), though I don't have much experience with it myself.\n", "Did you compile with -g -G options as noted in the documentation?\nNVCC, the NVIDIA CUDA compiler driver, provides a mechanism for generating the debugging information necessary for CUDA-GDB to work properly. The -g -G option pair must be passed to NVCC when an application is compiled for ease of debugging with CUDA-GDB; for example,\n\nnvcc -g -G foo.cu -o foo\n\nhere: https://docs.nvidia.com/cuda/cuda-gdb/index.html\n" ]
[ 1, 1, 0 ]
[]
[]
[ "cuda" ]
stackoverflow_0003706888_cuda.txt
Q: Some Products in ebay api do not return pictureURLLarge for product listing using findItemsAdvanced I am new to ebay API. I am trying to call a service from ebay as shown below http://svcs.ebay.com/services/search/FindingService/v1? OPERATION-NAME=findItemsAdvanced& SERVICE-VERSION=1.12.0& SECURITY-APPNAME=XXXXXXXX& GLOBAL-ID=EBAY-IN&outputSelector=PictureURLLarge& RESPONSE-DATA-FORMAT=XML&REST-PAYLOAD I have already added the outputSelector=PictureURLLarge. But some products returned do not receive the pictureURLLarge The following is the output of one of the SimpleXMLElement I am getting, SimpleXMLElement Object ( ............... ............... [galleryURL] => http://thumbs3.ebaystatic.com/m/mul3eDNEoMQ0GzjE_L86YtA/80.jpg [viewItemURL] => http://www.ebay.in/itm/Nike-Golf-Sport-Shoe-Tote-/290867798430?pt=LH_DefaultDomain_203 [paymentMethod] => PaisaPayEscrow [autoPay] => false [location] => India [country] => IN [shippingInfo] => SimpleXMLElement Object ( [shippingServiceCost] => 0.0 [shippingType] => Free [shipToLocations] => IN ) [sellingStatus] => SimpleXMLElement Object ( [currentPrice] => 3540.0 [convertedCurrentPrice] => 3540.0 [sellingState] => Active [timeLeft] => P26DT3H27M13S ) [listingInfo] => SimpleXMLElement Object ( [bestOfferEnabled] => false [buyItNowAvailable] => false [startTime] => 2013-02-21T18:09:53.000Z [endTime] => 2013-09-19T18:14:53.000Z [listingType] => StoreInventory [gift] => false ) [condition] => SimpleXMLElement Object ( [conditionId] => 1000 [conditionDisplayName] => New ) [isMultiVariationListing] => false [topRatedListing] => false ) If we check the link where the product is, that is viewItemURL here, we are been displayed a Large Image, but here we are not getting the same in PictureURLLarge. Is there any other way to do this? Any help would be appreciable. Note : I do not want to use the GetSingleItem, that would result in lots of api calls. Thanks in advance. A: The pictureURLLarge field is only included in the response if the seller has provided a large image for the item in their listing. If the field is not included in the response, it means that the seller has not provided a large image for the item. In this case, you can use the galleryURL field to display a smaller version of the item's image in your application.
Some Products in ebay api do not return pictureURLLarge for product listing using findItemsAdvanced
I am new to ebay API. I am trying to call a service from ebay as shown below http://svcs.ebay.com/services/search/FindingService/v1? OPERATION-NAME=findItemsAdvanced& SERVICE-VERSION=1.12.0& SECURITY-APPNAME=XXXXXXXX& GLOBAL-ID=EBAY-IN&outputSelector=PictureURLLarge& RESPONSE-DATA-FORMAT=XML&REST-PAYLOAD I have already added the outputSelector=PictureURLLarge. But some products returned do not receive the pictureURLLarge The following is the output of one of the SimpleXMLElement I am getting, SimpleXMLElement Object ( ............... ............... [galleryURL] => http://thumbs3.ebaystatic.com/m/mul3eDNEoMQ0GzjE_L86YtA/80.jpg [viewItemURL] => http://www.ebay.in/itm/Nike-Golf-Sport-Shoe-Tote-/290867798430?pt=LH_DefaultDomain_203 [paymentMethod] => PaisaPayEscrow [autoPay] => false [location] => India [country] => IN [shippingInfo] => SimpleXMLElement Object ( [shippingServiceCost] => 0.0 [shippingType] => Free [shipToLocations] => IN ) [sellingStatus] => SimpleXMLElement Object ( [currentPrice] => 3540.0 [convertedCurrentPrice] => 3540.0 [sellingState] => Active [timeLeft] => P26DT3H27M13S ) [listingInfo] => SimpleXMLElement Object ( [bestOfferEnabled] => false [buyItNowAvailable] => false [startTime] => 2013-02-21T18:09:53.000Z [endTime] => 2013-09-19T18:14:53.000Z [listingType] => StoreInventory [gift] => false ) [condition] => SimpleXMLElement Object ( [conditionId] => 1000 [conditionDisplayName] => New ) [isMultiVariationListing] => false [topRatedListing] => false ) If we check the link where the product is, that is viewItemURL here, we are been displayed a Large Image, but here we are not getting the same in PictureURLLarge. Is there any other way to do this? Any help would be appreciable. Note : I do not want to use the GetSingleItem, that would result in lots of api calls. Thanks in advance.
[ "The pictureURLLarge field is only included in the response if the seller has provided a large image for the item in their listing. If the field is not included in the response, it means that the seller has not provided a large image for the item. In this case, you can use the galleryURL field to display a smaller version of the item's image in your application.\n" ]
[ 0 ]
[]
[]
[ "ebay_api", "php" ]
stackoverflow_0018419865_ebay_api_php.txt
Q: Surfaces with different colormaps How can multiple surfaces be plotted on the axes but surfaces uses a different colormap?. Using colormap("...") changes it for the entire figure, not just a single surface. Thanks A: Do You mean on same axes? I haven't found a function that does this directly. But it is possible to pass the desired colors in the surf function. Way I found: Convert the data to a 0-1 scale and then convert to the desired colormap. Example with hot and jet colormaps: tx = ty = linspace (-8, 8, 41)'; [xx, yy] = meshgrid (tx, ty); r = sqrt (xx .^ 2 + yy .^ 2) + eps; tz = sin (r) ./ r ; function normalized = normalize_01(data) data_min = min(min(data)) data_max = max(max(data)) normalized = (data - data_min)/(data_max - data_min) endfunction function rgb = data2rgb(data, color_bits, cmap) grays = normalize_01(data) indexes = gray2ind(grays, color_bits) rgb = ind2rgb(indexes, cmap) endfunction color_bits = 128 cmap_1 = hot(color_bits) rgb_1 = data2rgb(tz, color_bits, cmap_1) surf(tx, ty, tz, rgb_1) hold on cmap_2 = jet(color_bits) rgb_2 = data2rgb(tz+3, color_bits, cmap_2) surf(tx, ty, tz+3, rgb_2) But if you also need a colorbar, this way might not be useful. Unless you find a way to manually add two colorbar like I did with the cmap.
Surfaces with different colormaps
How can multiple surfaces be plotted on the axes but surfaces uses a different colormap?. Using colormap("...") changes it for the entire figure, not just a single surface. Thanks
[ "Do You mean on same axes?\nI haven't found a function that does this directly. But it is possible to pass the desired colors in the surf function.\nWay I found:\nConvert the data to a 0-1 scale and then convert to the desired colormap.\nExample with hot and jet colormaps:\ntx = ty = linspace (-8, 8, 41)';\n[xx, yy] = meshgrid (tx, ty);\nr = sqrt (xx .^ 2 + yy .^ 2) + eps;\ntz = sin (r) ./ r ;\n\nfunction normalized = normalize_01(data)\n data_min = min(min(data))\n data_max = max(max(data))\n normalized = (data - data_min)/(data_max - data_min)\nendfunction\n\nfunction rgb = data2rgb(data, color_bits, cmap)\n grays = normalize_01(data)\n indexes = gray2ind(grays, color_bits)\n rgb = ind2rgb(indexes, cmap)\nendfunction\n\ncolor_bits = 128\n\ncmap_1 = hot(color_bits)\nrgb_1 = data2rgb(tz, color_bits, cmap_1)\nsurf(tx, ty, tz, rgb_1)\nhold on\n\ncmap_2 = jet(color_bits)\nrgb_2 = data2rgb(tz+3, color_bits, cmap_2)\nsurf(tx, ty, tz+3, rgb_2)\n\n\nBut if you also need a colorbar, this way might not be useful. Unless you find a way to manually add two colorbar like I did with the cmap.\n" ]
[ 1 ]
[]
[]
[ "colormap", "octave", "surface" ]
stackoverflow_0074642517_colormap_octave_surface.txt
Q: Private Constructor and abstract classes in java https://stackoverflow.com/a/7486111/17273668 ; from what I have seen here to make a class "static" , we have to make it final with private constructor and static fields and methods. Is there any difference between making the constructor private and making the class abstract? A: There is a huge difference between making a constructor private, or making a class abstract. Making a constructor private means that the constructor can only be invoked within the class itself or its nested classes, which means it cannot be called from outside. Making a class abstract means that it can only be instantiated by subclassing it. If a class is abstract, but has a non-private constructor (the default constructor is public), it means it can be subclassed by classes in other compilation units. When it comes to utility classes, making the class final with a private constructor is - in my opinion - the better choice. The alternative is to make it abstract with a private constructor, but I think that is abusing the term abstract. Making something abstract raises an expectation of having concrete subclasses, which is not what you do with a utility class. A: An abstract class can be extended by sub classes, a private constructor (if it is the only constructor) prevents sub-classing (exception: nested classes). The only way to instantiate a class with private constructor is by implementing a static factory method in the class itself (e.g. Optional.of).
Private Constructor and abstract classes in java
https://stackoverflow.com/a/7486111/17273668 ; from what I have seen here to make a class "static" , we have to make it final with private constructor and static fields and methods. Is there any difference between making the constructor private and making the class abstract?
[ "There is a huge difference between making a constructor private, or making a class abstract. Making a constructor private means that the constructor can only be invoked within the class itself or its nested classes, which means it cannot be called from outside. Making a class abstract means that it can only be instantiated by subclassing it. If a class is abstract, but has a non-private constructor (the default constructor is public), it means it can be subclassed by classes in other compilation units.\nWhen it comes to utility classes, making the class final with a private constructor is - in my opinion - the better choice. The alternative is to make it abstract with a private constructor, but I think that is abusing the term abstract. Making something abstract raises an expectation of having concrete subclasses, which is not what you do with a utility class.\n", "An abstract class can be extended by sub classes, a private constructor (if it is the only constructor) prevents sub-classing (exception: nested classes). The only way to instantiate a class with private constructor is by implementing a static factory method in the class itself (e.g. Optional.of).\n" ]
[ 3, 2 ]
[]
[]
[ "constructor", "java", "static_methods" ]
stackoverflow_0074674601_constructor_java_static_methods.txt
Q: how to find document by the populate field in mongoose My Product Schema Look like this. import mongoose from 'mongoose'; const productSchema = new mongoose.Schema( { name: { type: String, required: true }, game: { type: mongoose.Schema.Types.ObjectId, ref: 'Game', required: true, }, category: { type: mongoose.Schema.Types.ObjectId, ref: 'Category', required: true, }, slug: { type: String, required: true, unique: true }, image: { type: String, required: true }, price: { type: Number, required: true }, nominal: { type: Number, required: true }, description: { type: String, required: true }, }, { timestamps: true, } ); const Product = mongoose.models.Product || mongoose.model('Product', productSchema); export default Product; My schema game import mongoose from 'mongoose'; const gameSchema = new mongoose.Schema( { name: { type: String, require: [true, 'Type cant be empty'], }, status: { type: String, enum: ['Y', 'N'], default: 'Y', }, thumbnail: { type: String, require: [true, 'Type cant be empty'], }, }, { timestamps: true } ); const Game = mongoose.models.Game || mongoose.model('Game', gameSchema); export default Game; I want to find a product by the game status is 'Y' I try to do like this const getHandler = async (req: NextApiRequest, res: NextApiResponse) => { await db.connect(); const options = { status: { $regex: 'Y', $options: 'i' } }; const products = await Product.find({}).populate({ path: 'game', select: 'status', match: options, }); res.send(products); await db.disconnect(); }; but is not work is not filtering. the output is still the same but for the products with a game status is 'N' it shows null I heard that we could use aggregation with $lookup but I still don't know how to that A: This should work for you. let data = await Product.aggregate([ { $lookup: { from: "Game", //Your schema name localField: "game", //field name of product which contains game id foreignField: "_id", // _id of game pipeline: [ { $match: { status: "Y", }, }, ], as: "game", //name of result }, }, { $unwind: "$game" },// this will make your array to object and also it will remove all null entry. ]); console.log(data); A: try this way : const products = await Product.find({}).populate({ path: 'game', model:'Game', match: {'status':'Y'} select: 'status' });
how to find document by the populate field in mongoose
My Product Schema Look like this. import mongoose from 'mongoose'; const productSchema = new mongoose.Schema( { name: { type: String, required: true }, game: { type: mongoose.Schema.Types.ObjectId, ref: 'Game', required: true, }, category: { type: mongoose.Schema.Types.ObjectId, ref: 'Category', required: true, }, slug: { type: String, required: true, unique: true }, image: { type: String, required: true }, price: { type: Number, required: true }, nominal: { type: Number, required: true }, description: { type: String, required: true }, }, { timestamps: true, } ); const Product = mongoose.models.Product || mongoose.model('Product', productSchema); export default Product; My schema game import mongoose from 'mongoose'; const gameSchema = new mongoose.Schema( { name: { type: String, require: [true, 'Type cant be empty'], }, status: { type: String, enum: ['Y', 'N'], default: 'Y', }, thumbnail: { type: String, require: [true, 'Type cant be empty'], }, }, { timestamps: true } ); const Game = mongoose.models.Game || mongoose.model('Game', gameSchema); export default Game; I want to find a product by the game status is 'Y' I try to do like this const getHandler = async (req: NextApiRequest, res: NextApiResponse) => { await db.connect(); const options = { status: { $regex: 'Y', $options: 'i' } }; const products = await Product.find({}).populate({ path: 'game', select: 'status', match: options, }); res.send(products); await db.disconnect(); }; but is not work is not filtering. the output is still the same but for the products with a game status is 'N' it shows null I heard that we could use aggregation with $lookup but I still don't know how to that
[ "This should work for you.\nlet data = await Product.aggregate([\n{\n $lookup: {\n from: \"Game\", //Your schema name\n localField: \"game\", //field name of product which contains game id\n foreignField: \"_id\", // _id of game\n pipeline: [\n {\n $match: {\n status: \"Y\",\n },\n },\n ],\n as: \"game\", //name of result\n },\n },\n { $unwind: \"$game\" },// this will make your array to object and also it will remove all null entry.\n]);\nconsole.log(data);\n\n", "try this way :\n const products = await Product.find({}).populate({\n path: 'game',\n model:'Game',\n match: {'status':'Y'}\n select: 'status'\n });\n\n" ]
[ 1, 0 ]
[]
[]
[ "mongodb", "mongoose", "next.js", "node.js" ]
stackoverflow_0074672989_mongodb_mongoose_next.js_node.js.txt
Q: How to convert dataframe to nested dictionary with specific array and list? How can I use a dataframe to create a nested dictionary, with interleaved lists and columns, as in the example below? Create dictionary: columns = ["name","reason","cgc","limit","email","address","message","type","value"] data = [("Paulo", "La Fava","123456","0","[email protected]","avenue A","msg txt 1","string","low"), ("Pedro", "Petrus","123457","20.00","[email protected]","avenue A","msg txt 2","string", "average"), ("Saulo", "Salix","123458","150.00","[email protected]","avenue B","msg txt 3","string","high")] df = spark.createDataFrame(data).toDF(*columns) df.show() expected outcome { "accepted": [ { "issuer": { "name": "Paulo", "reason": "La Fava", "cgc": "123456" }, "Recipient": { "limit": "0", "email": "[email protected]", "address": "avenue A" }, "additional_fields": [ { "message": "msg txt 1", "type": "string", "value": "low" } ] } ] } A: Arrays in Spark are homogeneous i.e. the elements should have same data type. In your sample expected output, the array type of "additional_fields" does not match with other two map fields "issuer" & "recipient". You have two ways to resolve this: If you can relax "additional_fields" to be just the map (not array) like "issuer" & "recipient", then you can use following transformation: df = df.withColumn("issuer", F.create_map(F.lit("name"), F.col("name"), \ F.lit("reason"), F.col("reason"), \ F.lit("cgc"), F.col("cgc"), \ ) ) \ .withColumn("recipient", F.create_map(F.lit("limit"), F.col("limit"), \ F.lit("email"), F.col("email"), \ F.lit("address"), F.col("address"), \ ) ) \ .withColumn("additional_fields", F.create_map(F.lit("message"), F.col("message"), \ F.lit("type"), F.col("type"), \ F.lit("value"), F.col("value"), \ ) ) \ .withColumn("accepted", F.array(F.create_map(F.lit("issuer"), F.col("issuer"), \ F.lit("recipient"), F.col("recipient"), \ F.lit("additional_fields"), F.col("additional_fields"), \ )) ) \ .drop(*[c for c in df.columns if c != "accepted"] + ["issuer", "recipient", "additional_fields"]) or, if you want to make "issuer" & "recipient" field types similar to "additional_fields" then use: df = df.withColumn("issuer", F.array([F.create_map(F.lit(c), F.col(c)) for c in ["name", "reason", "cgc"]])) \ .withColumn("recipient", F.array([F.create_map(F.lit(c), F.col(c)) for c in ["limit", "email", "address"]])) \ .withColumn("additional_fields", F.array([F.create_map(F.lit(c), F.col(c)) for c in ["message", "type", "value"]])) \ .withColumn("accepted", F.array([F.create_map(F.lit(c), F.col(c)) for c in ["issuer", "recipient", "additional_fields"]])) \ .drop(*[c for c in df.columns if c != "accepted"] + ["issuer", "recipient", "additional_fields"])
How to convert dataframe to nested dictionary with specific array and list?
How can I use a dataframe to create a nested dictionary, with interleaved lists and columns, as in the example below? Create dictionary: columns = ["name","reason","cgc","limit","email","address","message","type","value"] data = [("Paulo", "La Fava","123456","0","[email protected]","avenue A","msg txt 1","string","low"), ("Pedro", "Petrus","123457","20.00","[email protected]","avenue A","msg txt 2","string", "average"), ("Saulo", "Salix","123458","150.00","[email protected]","avenue B","msg txt 3","string","high")] df = spark.createDataFrame(data).toDF(*columns) df.show() expected outcome { "accepted": [ { "issuer": { "name": "Paulo", "reason": "La Fava", "cgc": "123456" }, "Recipient": { "limit": "0", "email": "[email protected]", "address": "avenue A" }, "additional_fields": [ { "message": "msg txt 1", "type": "string", "value": "low" } ] } ] }
[ "Arrays in Spark are homogeneous i.e. the elements should have same data type. In your sample expected output, the array type of \"additional_fields\" does not match with other two map fields \"issuer\" & \"recipient\".\nYou have two ways to resolve this:\nIf you can relax \"additional_fields\" to be just the map (not array) like \"issuer\" & \"recipient\", then you can use following transformation:\ndf = df.withColumn(\"issuer\", F.create_map(F.lit(\"name\"), F.col(\"name\"), \\\n F.lit(\"reason\"), F.col(\"reason\"), \\\n F.lit(\"cgc\"), F.col(\"cgc\"), \\\n )\n ) \\\n .withColumn(\"recipient\", F.create_map(F.lit(\"limit\"), F.col(\"limit\"), \\\n F.lit(\"email\"), F.col(\"email\"), \\\n F.lit(\"address\"), F.col(\"address\"), \\\n )\n ) \\\n .withColumn(\"additional_fields\", F.create_map(F.lit(\"message\"), F.col(\"message\"), \\\n F.lit(\"type\"), F.col(\"type\"), \\\n F.lit(\"value\"), F.col(\"value\"), \\\n )\n ) \\\n .withColumn(\"accepted\", F.array(F.create_map(F.lit(\"issuer\"), F.col(\"issuer\"), \\\n F.lit(\"recipient\"), F.col(\"recipient\"), \\\n F.lit(\"additional_fields\"), F.col(\"additional_fields\"), \\\n ))\n ) \\\n .drop(*[c for c in df.columns if c != \"accepted\"] + [\"issuer\", \"recipient\", \"additional_fields\"])\n\nor, if you want to make \"issuer\" & \"recipient\" field types similar to \"additional_fields\" then use:\ndf = df.withColumn(\"issuer\", F.array([F.create_map(F.lit(c), F.col(c)) for c in [\"name\", \"reason\", \"cgc\"]])) \\\n .withColumn(\"recipient\", F.array([F.create_map(F.lit(c), F.col(c)) for c in [\"limit\", \"email\", \"address\"]])) \\\n .withColumn(\"additional_fields\", F.array([F.create_map(F.lit(c), F.col(c)) for c in [\"message\", \"type\", \"value\"]])) \\\n .withColumn(\"accepted\", F.array([F.create_map(F.lit(c), F.col(c)) for c in [\"issuer\", \"recipient\", \"additional_fields\"]])) \\\n .drop(*[c for c in df.columns if c != \"accepted\"] + [\"issuer\", \"recipient\", \"additional_fields\"])\n\n" ]
[ 0 ]
[]
[]
[ "pandas", "pyspark", "python" ]
stackoverflow_0074669493_pandas_pyspark_python.txt
Q: Read Xlsx file from a string I have an Xlsx file content in a string, loaded from the database. Currently, I create the spreadsheet using a temp file: file_put_contents($fileName, $bundle['package_blank']); $wobook = \PhpOffice\PhpSpreadsheet\IOFactory::load($fileName); unlink($fileName); Any way to read it directly from memory without having to write it in a temporary file? A: You can use the loadFromString method provided by the \PhpOffice\PhpSpreadsheet\IOFactory class to load an XLSX file from a string, rather than from a file. Here's an example: $xlsxString = $bundle['package_blank']; $wobook = \PhpOffice\PhpSpreadsheet\IOFactory::loadFromString($xlsxString);
Read Xlsx file from a string
I have an Xlsx file content in a string, loaded from the database. Currently, I create the spreadsheet using a temp file: file_put_contents($fileName, $bundle['package_blank']); $wobook = \PhpOffice\PhpSpreadsheet\IOFactory::load($fileName); unlink($fileName); Any way to read it directly from memory without having to write it in a temporary file?
[ "You can use the loadFromString method provided by the \\PhpOffice\\PhpSpreadsheet\\IOFactory class to load an XLSX file from a string, rather than from a file. Here's an example:\n$xlsxString = $bundle['package_blank'];\n\n$wobook = \\PhpOffice\\PhpSpreadsheet\\IOFactory::loadFromString($xlsxString);\n\n" ]
[ 0 ]
[]
[]
[ "php", "phpspreadsheet" ]
stackoverflow_0048831225_php_phpspreadsheet.txt
Q: Can you help me to find out this question (Top 10 movie making country's video length mean) from netfix dataset df_movies = df[df['type'] == 'Movie'] #top 10 movie making country df_movies['country'].value_counts().index[:10] #movie duration mean for each country df_movies.groupby(['minute']).mean() i'm learning python in jupyter notebook. i cant sort ""Top 10 movie making country's video length mean"" . can anyone help me to do this. Top 10 movie making country's video length mean. A: Assuming that you have the same dataset that I used, this is the way I would do it: Code: import pandas as pd df = pd.read_csv('https://raw.githubusercontent.com/nataliafonseca/netflix-data-analysis/main/datasets/netflix_data.csv') # Select only the movies df_movies = df[df['type'] == 'Movie'] # top 10 movie making countries top_10_countries = list(df_movies['country'].value_counts().index[:10]) print("Top10 Countries:", top_10_countries) # movie duration mean for each country avg_duration = df_movies[df_movies.country.isin(top_10_countries)].groupby('country')['duration']\ .mean().reset_index()\ .sort_values(by=['duration']) print(avg_duration) Output: Top10 Countries: ['United States', 'India', 'United Kingdom', 'Canada', 'France', 'Spain', 'Egypt', 'Mexico', 'Turkey', 'Japan'] country duration 0 Canada 86.748571 5 Mexico 88.556962 9 United States 90.689048 8 United Kingdom 94.152493 4 Japan 95.360000 2 France 96.408759 6 Spain 102.016807 7 Turkey 107.538462 1 Egypt 109.548387 3 India 126.922990
Can you help me to find out this question (Top 10 movie making country's video length mean) from netfix dataset
df_movies = df[df['type'] == 'Movie'] #top 10 movie making country df_movies['country'].value_counts().index[:10] #movie duration mean for each country df_movies.groupby(['minute']).mean() i'm learning python in jupyter notebook. i cant sort ""Top 10 movie making country's video length mean"" . can anyone help me to do this. Top 10 movie making country's video length mean.
[ "Assuming that you have the same dataset that I used, this is the way I would do it:\nCode:\nimport pandas as pd\n\ndf = pd.read_csv('https://raw.githubusercontent.com/nataliafonseca/netflix-data-analysis/main/datasets/netflix_data.csv')\n\n# Select only the movies\ndf_movies = df[df['type'] == 'Movie']\n\n# top 10 movie making countries\ntop_10_countries = list(df_movies['country'].value_counts().index[:10])\nprint(\"Top10 Countries:\", top_10_countries)\n\n\n# movie duration mean for each country\navg_duration = df_movies[df_movies.country.isin(top_10_countries)].groupby('country')['duration']\\\n .mean().reset_index()\\\n .sort_values(by=['duration'])\nprint(avg_duration)\n\nOutput:\nTop10 Countries: ['United States', 'India', 'United Kingdom', 'Canada', 'France', 'Spain', 'Egypt', 'Mexico', 'Turkey', 'Japan']\n\n country duration\n0 Canada 86.748571\n5 Mexico 88.556962\n9 United States 90.689048\n8 United Kingdom 94.152493\n4 Japan 95.360000\n2 France 96.408759\n6 Spain 102.016807\n7 Turkey 107.538462\n1 Egypt 109.548387\n3 India 126.922990\n\n" ]
[ 0 ]
[]
[]
[ "data_analysis", "data_science", "dataset", "netflix", "python_2.7" ]
stackoverflow_0074674053_data_analysis_data_science_dataset_netflix_python_2.7.txt
Q: Automate a click to make a collapse link change from less info to more info I an trying use automation to click on a "More Details" link on a popup to show more info but the link doesn't have any id associated with it. When I inspect the link in Chrome console the it appears to be the "href="#myCollapse" line below in the associated element. I dont know js at all really but have tried a number of things along the lines of document.getElementById('#myCollapse').click();... and tried to use child of id="popup-content". <div id="popup-content"> <span id="overlayAddress">SCOTTISH PARLIAMENT 1 HORSE WYND OLD TOWN </span><br> <span id="overlayTown">EDINBURGH</span><br> <span id="overlayPostCode">EH99 1SP</span> <br> <a href="#myCollapse" data-bs-toggle="collapse" class="collapsed" aria-expanded="false">More Details</a> <div class="collapse" id="myCollapse" style=""> <label style="color:white;"><b>UPRN :&nbsp;</b></label><span id="overlayuprn">906423108</span><br> <label style="color:white;"><b>POSTAL TOWN :&nbsp;</b></label><span id="overlayPostTown">EDINBURGH</span><br> <label style="color:white;"><b>CUSTODIAN :&nbsp;</b></label><span id="overlayCustodian">City of Edinburgh</span><br> </div> </div> A: Just use the following selector: document.querySelector('#popup-content a[href="#myCollapse"]').click()
Automate a click to make a collapse link change from less info to more info
I an trying use automation to click on a "More Details" link on a popup to show more info but the link doesn't have any id associated with it. When I inspect the link in Chrome console the it appears to be the "href="#myCollapse" line below in the associated element. I dont know js at all really but have tried a number of things along the lines of document.getElementById('#myCollapse').click();... and tried to use child of id="popup-content". <div id="popup-content"> <span id="overlayAddress">SCOTTISH PARLIAMENT 1 HORSE WYND OLD TOWN </span><br> <span id="overlayTown">EDINBURGH</span><br> <span id="overlayPostCode">EH99 1SP</span> <br> <a href="#myCollapse" data-bs-toggle="collapse" class="collapsed" aria-expanded="false">More Details</a> <div class="collapse" id="myCollapse" style=""> <label style="color:white;"><b>UPRN :&nbsp;</b></label><span id="overlayuprn">906423108</span><br> <label style="color:white;"><b>POSTAL TOWN :&nbsp;</b></label><span id="overlayPostTown">EDINBURGH</span><br> <label style="color:white;"><b>CUSTODIAN :&nbsp;</b></label><span id="overlayCustodian">City of Edinburgh</span><br> </div> </div>
[ "Just use the following selector:\ndocument.querySelector('#popup-content a[href=\"#myCollapse\"]').click()\n\n" ]
[ 1 ]
[]
[]
[ "html", "javascript" ]
stackoverflow_0074674640_html_javascript.txt
Q: Teradata SQL code to find count between eff start and end date I have a dataset that has 5 columns. Each account can have multiple rows. I need to group the data by C2 and Monthyear and find counts ACC_ID, C1 , C2, EFF_START_DATE, EFF_END_DATE 111 , 0 , A , 2018-01-01, 2499-12-31 222 , 0 , A , 2018-02-15 , 2018-03-15 222 , 0 , B , 2018-03-16, 2499-12-31 333 , 0, A, 2000-01-01, 2499-12-31 I need to group this by months and find count for each month. So if someone has 2018-01-01 as EFF_STA_DTE and 2499-12-31 as EFF_END_DATE. They should be a part of all the months starting 2018. Similarly if someone has 2018-02-15 as EFF_STA_DTE and 2018-03-15 as EFF_END_DATE their count should only reflect for Feb and March 2018. Also I am only trying to get a count starting 2018 even if eff_start_Date is in past. So 333 in above case will have count 1 in 2018 and henceforth Tried to extract Month year and do the count based on eff_start_Date but that is giving incorrect result. Expected Output in above case MONTH, C2, COUNT JAN-18, A, 2. -- FOR ACCOUNT 111 ,333 FEB-18, A , 3. -- FOR ACCOUNT 111,222,333 MARCH-18, A, 1 -- FOR ACCOUNT 111,222,333 MARCH-18, B, 1. -- FOR ACCOUNT 222 A: The most efficient way utilizes Teradata's EXPAND ON extension to Standard SQL: WITH cte AS ( SELECT -- first of month Trunc(BEGIN(pd), 'mon') AS mon ,C2 FROM tab -- create a period on-the-fly, adjust the end date as periods exclude the end EXPAND ON PERIOD(EFF_START_DATE, Next(EFF_END_DATE)) AS pd -- return one row per month BY ANCHOR PERIOD MONTH_BEGIN -- restrict output to a specifc range FOR PERIOD (date '2018-01-01', date '2018-03-31') ) SELECT mon, C2, Count(*) FROM cte GROUP BY 1,2 ORDER BY 1,2 ;
Teradata SQL code to find count between eff start and end date
I have a dataset that has 5 columns. Each account can have multiple rows. I need to group the data by C2 and Monthyear and find counts ACC_ID, C1 , C2, EFF_START_DATE, EFF_END_DATE 111 , 0 , A , 2018-01-01, 2499-12-31 222 , 0 , A , 2018-02-15 , 2018-03-15 222 , 0 , B , 2018-03-16, 2499-12-31 333 , 0, A, 2000-01-01, 2499-12-31 I need to group this by months and find count for each month. So if someone has 2018-01-01 as EFF_STA_DTE and 2499-12-31 as EFF_END_DATE. They should be a part of all the months starting 2018. Similarly if someone has 2018-02-15 as EFF_STA_DTE and 2018-03-15 as EFF_END_DATE their count should only reflect for Feb and March 2018. Also I am only trying to get a count starting 2018 even if eff_start_Date is in past. So 333 in above case will have count 1 in 2018 and henceforth Tried to extract Month year and do the count based on eff_start_Date but that is giving incorrect result. Expected Output in above case MONTH, C2, COUNT JAN-18, A, 2. -- FOR ACCOUNT 111 ,333 FEB-18, A , 3. -- FOR ACCOUNT 111,222,333 MARCH-18, A, 1 -- FOR ACCOUNT 111,222,333 MARCH-18, B, 1. -- FOR ACCOUNT 222
[ "The most efficient way utilizes Teradata's EXPAND ON extension to Standard SQL:\nWITH cte AS \n (\n SELECT -- first of month\n Trunc(BEGIN(pd), 'mon') AS mon \n ,C2\n FROM tab\n -- create a period on-the-fly, adjust the end date as periods exclude the end \n EXPAND ON PERIOD(EFF_START_DATE, Next(EFF_END_DATE)) AS pd\n -- return one row per month\n BY ANCHOR PERIOD MONTH_BEGIN\n -- restrict output to a specifc range\n FOR PERIOD (date '2018-01-01', date '2018-03-31')\n )\nSELECT mon, C2, Count(*)\nFROM cte\nGROUP BY 1,2\nORDER BY 1,2\n;\n\n" ]
[ 0 ]
[]
[]
[ "sql", "teradata", "teradatasql" ]
stackoverflow_0074673534_sql_teradata_teradatasql.txt
Q: AndroidTV IPTV change channel by number input Need to change channel by numpad of remote controller but don't know correct method. Numbers can be pressed multiple times and need to wait for all of them and get 1 String like "123". Now I override onKey up like below override fun onKeyUp(keyCode: Int, event: KeyEvent): Boolean { return when(keyCode){ KeyEvent.KEYCODE_1->{ //I don't know how to wait here next key up and get correct full channel number true } KeyEvent.KEYCODE_2->{ ... true } //EPG KeyEvent.KEYCODE_3->{ ... true } ... ... else -> super.onKeyUp(keyCode, event) } } A: To perform searching you need first to collect all digits of the number in range of some time, you can use StringBuilder to append one digit inside onKeyUp or OnKeyDown depend on your requirements You need to delay performing search until the user write the full number, you can use CountDownTimer and reset the time every time the use write new digit (You can also create a progress bar represent the timer and update it) or you can use simple Timer When the time is finish you should perform the search operation and clear the last number in StringBuilder and Update UI val channelNumber = StringBuilder() val numberSearchTimer : CountDownTimer? override fun onKeyDown(keyCode: Int, event: KeyEvent): Boolean { return when(keyCode){ KeyEvent.KEYCODE_1 -> { cannelNumber.append("1") perfomNumberSearch() true } KeyEvent.KEYCODE_2 -> { cannelNumber.append("2") perfomNumberSearch() true } ... else -> super.onKeyUp(keyCode, event) } } private fun perfomNumberSearch() { // Update UI With the new number binding.searchChannelNumber.text = channelNumber.toString() // Cancel the current time of it exists if (numberSearchTimer != null) numberSearchTimer.cancel() numberSearchTimer = object : CountDownTimer(1000, 100) { override fun onTick(millisUntilFinished: Long) { // Update UI With a progress until it perform search // or replace CountDownTimer with Timer } override fun onFinish() { changeChannelByNumber(channelNumber.toString()) // Clear the last search number after performing it channelNumber.clear() binding.searchChannelNumber.text = channelNumber.toString() } }.start() }
AndroidTV IPTV change channel by number input
Need to change channel by numpad of remote controller but don't know correct method. Numbers can be pressed multiple times and need to wait for all of them and get 1 String like "123". Now I override onKey up like below override fun onKeyUp(keyCode: Int, event: KeyEvent): Boolean { return when(keyCode){ KeyEvent.KEYCODE_1->{ //I don't know how to wait here next key up and get correct full channel number true } KeyEvent.KEYCODE_2->{ ... true } //EPG KeyEvent.KEYCODE_3->{ ... true } ... ... else -> super.onKeyUp(keyCode, event) } }
[ "\nTo perform searching you need first to collect all digits of the number in range of some time, you can use StringBuilder to append one digit inside onKeyUp or OnKeyDown depend on your requirements\n\nYou need to delay performing search until the user write the full number, you can use CountDownTimer and reset the time every time the use write new digit (You can also create a progress bar represent the timer and update it) or you can use simple Timer\n\nWhen the time is finish you should perform the search operation and clear the last number in StringBuilder and Update UI\n\n\nval channelNumber = StringBuilder()\nval numberSearchTimer : CountDownTimer?\n\noverride fun onKeyDown(keyCode: Int, event: KeyEvent): Boolean {\n return when(keyCode){\n KeyEvent.KEYCODE_1 -> {\n cannelNumber.append(\"1\")\n perfomNumberSearch()\n true\n }\n KeyEvent.KEYCODE_2 -> {\n cannelNumber.append(\"2\")\n perfomNumberSearch()\n true\n }\n ...\n \n else -> super.onKeyUp(keyCode, event)\n }\n}\n\nprivate fun perfomNumberSearch() {\n // Update UI With the new number\n binding.searchChannelNumber.text = channelNumber.toString()\n \n // Cancel the current time of it exists\n if (numberSearchTimer != null) numberSearchTimer.cancel()\n \n numberSearchTimer = object : CountDownTimer(1000, 100) {\n override fun onTick(millisUntilFinished: Long) {\n // Update UI With a progress until it perform search\n // or replace CountDownTimer with Timer\n }\n \n override fun onFinish() {\n changeChannelByNumber(channelNumber.toString())\n \n \n // Clear the last search number after performing it\n channelNumber.clear()\n binding.searchChannelNumber.text = channelNumber.toString()\n }\n }.start()\n}\n\n" ]
[ 1 ]
[]
[]
[ "android", "android_tv", "iptv", "kotlin", "numpad" ]
stackoverflow_0074673882_android_android_tv_iptv_kotlin_numpad.txt
Q: is there any way to auto dismiss Help! Can I connect this alert with table then if there is a record or one row inserted in table, the alert display else if there is no record in the table it means the alert already disappeared. Kindly help me <div class="alert alert-warning alert-dismissible"> <button type="button" class="close" data-dismiss="alert" aria-hidden="true">&times;</button> <h6> <i class="icon fas fa-exclamation-triangle"></i> <i> Dear user! there is a problem of approving request</i> </h6> </div> A: your implementation is not efficient but setTimeout(),remove() are the options to go in your case. either this : const alert = document.querySelector('.alert'); // Dismiss the alert after 5 seconds setTimeout(() => { alert.remove(); }, 5000); or this : const alert = document.querySelector('.alert'); // Dismiss the alert after 5 seconds setTimeout(() => { alert.style.display = 'none'; }, 5000);
is there any way to auto dismiss
Help! Can I connect this alert with table then if there is a record or one row inserted in table, the alert display else if there is no record in the table it means the alert already disappeared. Kindly help me <div class="alert alert-warning alert-dismissible"> <button type="button" class="close" data-dismiss="alert" aria-hidden="true">&times;</button> <h6> <i class="icon fas fa-exclamation-triangle"></i> <i> Dear user! there is a problem of approving request</i> </h6> </div>
[ "your implementation is not efficient but setTimeout(),remove() are the options to go in your case.\neither this :\nconst alert = document.querySelector('.alert');\n\n// Dismiss the alert after 5 seconds\nsetTimeout(() => {\n alert.remove();\n}, 5000);\n\nor this :\nconst alert = document.querySelector('.alert');\n\n// Dismiss the alert after 5 seconds\nsetTimeout(() => {\n alert.style.display = 'none';\n}, 5000);\n\n" ]
[ 0 ]
[]
[]
[ "html", "javascript" ]
stackoverflow_0074674646_html_javascript.txt
Q: Not supported record types for CSV import in Netsuite Can I get a name of Unsupported record types that cannot be imported using CSV in Netsuite. Item fulfillment is the 1 know... Please advise. Can I get a name of Unsupported record types that cannot be imported using CSV in Netsuite. Item fulfillment is the 1 know... Please advise. A: You can get list of supported records from suiteanswer id 10008 https://suiteanswers.custhelp.com/app/answers/detail/a_id/10008 So anything not listed on this page means not supported :D From my experience, these are records that can't be imported using csv. Transactions Item Fulfilment Item Receipt Customer Deposit Bank Transfer
Not supported record types for CSV import in Netsuite
Can I get a name of Unsupported record types that cannot be imported using CSV in Netsuite. Item fulfillment is the 1 know... Please advise. Can I get a name of Unsupported record types that cannot be imported using CSV in Netsuite. Item fulfillment is the 1 know... Please advise.
[ "You can get list of supported records from suiteanswer id 10008\nhttps://suiteanswers.custhelp.com/app/answers/detail/a_id/10008\nSo anything not listed on this page means not supported :D\nFrom my experience, these are records that can't be imported using csv.\nTransactions\n\nItem Fulfilment\nItem Receipt\nCustomer Deposit\nBank Transfer\n\n" ]
[ 0 ]
[]
[]
[ "netsuite" ]
stackoverflow_0074560604_netsuite.txt
Q: LinqKit Core PredicateBuilder not functioning correctly I have this query: var query = LinqKit.PredicateBuilder.New<Resume>(); if (selectedWorkFieldID != 0) { query = query.And(js => js.WorkFieldID == selectedWorkFieldID); if (!(selectedJobIDs.Contains(0) && selectedJobIDs.Count() == 1)) { foreach (int jobID in selectedJobIDs) query = query.Or(js => js.JobID == jobID); } } var finalQuery = context.Resumes.AsNoTracking().Include(r => r.ResumeSkills) .ThenInclude(rs => rs.Skill).Include(r => r.JobSeeker).ThenInclude(r => r.Profile) .AsExpandable().Where(query); count = finalQuery.Count(); resumes = finalQuery.Skip(args.Skip.Value).Take(args.Top.Value).ToList<Resume>(); This query returns All resumes not filtered ones. When I debug, the debugger curser enters the foreach block that filters with or, and there is one jobID in selectedJobIDs but the query returns all resumes. it seems the predicate builder not working at all. How to solve this? A: I changed code to this: if (selectedWorkFieldID != 0) { query = query.And(js => js.WorkFieldID == selectedWorkFieldID); if (!(selectedJobIDs.Contains(0) && selectedJobIDs.Count() == 1)) { var query2 = LinqKit.PredicateBuilder.New<Resume>(); foreach (int jobID in selectedJobIDs) query2 = query2.Or(js => js.JobID == jobID); query.And(query2); } } and it is corrected.
LinqKit Core PredicateBuilder not functioning correctly
I have this query: var query = LinqKit.PredicateBuilder.New<Resume>(); if (selectedWorkFieldID != 0) { query = query.And(js => js.WorkFieldID == selectedWorkFieldID); if (!(selectedJobIDs.Contains(0) && selectedJobIDs.Count() == 1)) { foreach (int jobID in selectedJobIDs) query = query.Or(js => js.JobID == jobID); } } var finalQuery = context.Resumes.AsNoTracking().Include(r => r.ResumeSkills) .ThenInclude(rs => rs.Skill).Include(r => r.JobSeeker).ThenInclude(r => r.Profile) .AsExpandable().Where(query); count = finalQuery.Count(); resumes = finalQuery.Skip(args.Skip.Value).Take(args.Top.Value).ToList<Resume>(); This query returns All resumes not filtered ones. When I debug, the debugger curser enters the foreach block that filters with or, and there is one jobID in selectedJobIDs but the query returns all resumes. it seems the predicate builder not working at all. How to solve this?
[ "I changed code to this:\nif (selectedWorkFieldID != 0)\n {\n query = query.And(js => js.WorkFieldID == selectedWorkFieldID);\n if (!(selectedJobIDs.Contains(0) && selectedJobIDs.Count() == 1))\n {\n var query2 = LinqKit.PredicateBuilder.New<Resume>();\n foreach (int jobID in selectedJobIDs)\n query2 = query2.Or(js => js.JobID == jobID);\n query.And(query2);\n }\n }\n\nand it is corrected.\n" ]
[ 0 ]
[]
[]
[ "entity_framework_core", "linq", "linqkit" ]
stackoverflow_0074674230_entity_framework_core_linq_linqkit.txt
Q: C# using System.Windows.Forms does not exist I tried to add: using System.Windows.Forms But its shows me an error. I know I must assembly that to my VS2012 but I srsly don't know how. I was searching everywhere and tried everything I've found but none of this helps me. @edit 1 hour of searching in the web and... 2 minutes after write this post i found... Project >> add Reference... Solved :] A: If you're using Visual Studio, right click the References folder in your project, select Add Reference... and then on the .NET tab, choose System.Windows.Forms and click OK. A: Right click on project references in Solution Explorer, choose Add Reference..., there you have to find System.Windows.Forms assembly (under Assemblies=>Framework) and add it to the project. A: For people with .NET 6.0: Add this to your .csproj file (click on the file named after your project in Solution Explorer) before the </Project>: </PropertyGroup> <PropertyGroup> <TargetFramework>netcoreapp6.0-windows</TargetFramework> <UseWPF>true</UseWPF> <UseWindowsForms>true</UseWindowsForms> </PropertyGroup>
C# using System.Windows.Forms does not exist
I tried to add: using System.Windows.Forms But its shows me an error. I know I must assembly that to my VS2012 but I srsly don't know how. I was searching everywhere and tried everything I've found but none of this helps me. @edit 1 hour of searching in the web and... 2 minutes after write this post i found... Project >> add Reference... Solved :]
[ "If you're using Visual Studio, right click the References folder in your project, select Add Reference... and then on the .NET tab, choose System.Windows.Forms and click OK.\n", "Right click on project references in Solution Explorer, choose Add Reference..., there you have to find System.Windows.Forms assembly (under Assemblies=>Framework) and add it to the project.\n", "For people with .NET 6.0:\nAdd this to your .csproj file (click on the file named after your project in Solution Explorer) before the </Project>:\n</PropertyGroup>\n <PropertyGroup>\n <TargetFramework>netcoreapp6.0-windows</TargetFramework>\n <UseWPF>true</UseWPF>\n <UseWindowsForms>true</UseWindowsForms>\n</PropertyGroup>\n\n" ]
[ 11, 5, 0 ]
[]
[]
[ "c#" ]
stackoverflow_0015712649_c#.txt
Q: How can I replace values ​in an array? I have an array of 1 and 2, I need to change the values ​​2 to 0 but I know how to do it. Someone who can help me with the code in R. Thanck A: You can try %% arr <- arr %% 2 or replace replace(arr, arr == 2, 0), or just - 2 - arr or a faster version with bitwAnd arr <- bitwAnd(arr, 1) Benchmark set.seed(1) arr <- sample(1:2, 1e6, replace = TRUE) microbenchmark( arr %% 2, replace(arr, arr == 2, 0), 2 - arr, bitwAnd(arr, 1), check = "equivalent" ) gives Unit: milliseconds expr min lq mean median uq arr%%2 10.874101 11.269351 12.689352 11.537851 13.611351 replace(arr, arr == 2, 0) 9.174002 9.542651 11.347834 10.070752 13.385150 2 - arr 1.477701 1.548151 3.076632 1.612951 1.981701 bitwAnd(arr, 1) 1.322300 1.368951 1.678158 1.413750 1.537602 max neval 24.764601 100 16.579201 100 61.927400 100 6.053701 100 A: Since the question is about arrays I want to expand a little to a more general case where you want to replace only certain dimension or one dimension according to another dimension. set.seed(1) arr <- array(data=sample(1:2, 24, replace = TRUE), dim=c(2,3,4)) # replace all 2 values arr[arr==2] <- 0 # replace only first index of dimension 1 arr[1,,][arr[1,,]==2] <- 0 # replace second index according to first index of dimension 1 arr[2,,][arr[1,,]==2] <- 0
How can I replace values ​in an array?
I have an array of 1 and 2, I need to change the values ​​2 to 0 but I know how to do it. Someone who can help me with the code in R. Thanck
[ "You can try %%\narr <- arr %% 2\n\nor replace\nreplace(arr, arr == 2, 0),\n\nor just -\n2 - arr\n\nor a faster version with bitwAnd\narr <- bitwAnd(arr, 1)\n\n\nBenchmark\nset.seed(1)\narr <- sample(1:2, 1e6, replace = TRUE)\nmicrobenchmark(\n arr %% 2,\n replace(arr, arr == 2, 0),\n 2 - arr,\n bitwAnd(arr, 1),\n check = \"equivalent\"\n)\n\n\ngives\nUnit: milliseconds\n expr min lq mean median uq\n arr%%2 10.874101 11.269351 12.689352 11.537851 13.611351\n replace(arr, arr == 2, 0) 9.174002 9.542651 11.347834 10.070752 13.385150\n 2 - arr 1.477701 1.548151 3.076632 1.612951 1.981701\n bitwAnd(arr, 1) 1.322300 1.368951 1.678158 1.413750 1.537602\n max neval\n 24.764601 100\n 16.579201 100\n 61.927400 100\n 6.053701 100\n\n", "Since the question is about arrays I want to expand a little to a more general case where you want to replace only certain dimension or one dimension according to another dimension.\nset.seed(1)\narr <- array(data=sample(1:2, 24, replace = TRUE), dim=c(2,3,4))\n\n# replace all 2 values\narr[arr==2] <- 0\n\n# replace only first index of dimension 1\narr[1,,][arr[1,,]==2] <- 0\n\n# replace second index according to first index of dimension 1\narr[2,,][arr[1,,]==2] <- 0\n\n" ]
[ 3, 0 ]
[]
[]
[ "arrays", "for_loop", "if_statement", "loops", "r" ]
stackoverflow_0072042864_arrays_for_loop_if_statement_loops_r.txt
Q: Elastic Search - Java Permissions Issue I'm trying to allow remote connections to elasticsearch(7.1.7). Whenever I change network.host in /etc/elastic/elasticsearch.yml to anything but the default value i get an error. error [2022-12-04T03:09:13,741][INFO ][o.e.x.m.p.NativeController] [ubuntu] Native controller process has stopped - no new native processes can be started [2022-12-04T03:09:13,741][ERROR][o.e.b.ElasticsearchUncaughtExceptionHandler] [ubuntu] uncaught exception in thread [process reaper (pid 2635649)] java.security.AccessControlException: access denied ("java.lang.RuntimePermission" "modifyThread") at java.security.AccessControlContext.checkPermission(AccessControlContext.java:485) ~[?:?] at java.security.AccessController.checkPermission(AccessController.java:1068) ~[?:?] at java.lang.SecurityManager.checkPermission(SecurityManager.java:411) ~[?:?] at org.elasticsearch.secure_sm.SecureSM.checkThreadAccess(SecureSM.java:160) ~[?:7.17.7] at org.elasticsearch.secure_sm.SecureSM.checkAccess(SecureSM.java:120) ~[?:7.17.7] at java.lang.Thread.checkAccess(Thread.java:2360) ~[?:?] at java.lang.Thread.setDaemon(Thread.java:2308) ~[?:?] at java.lang.ProcessHandleImpl.lambda$static$0(ProcessHandleImpl.java:103) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.<init>(ThreadPoolExecutor.java:637) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.addWorker(ThreadPoolExecutor.java:928) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.processWorkerExit(ThreadPoolExecutor.java:1021) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1158) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:642) ~[?:?] at java.lang.Thread.run(Thread.java:1589) [?:?] at jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:186) ~[?:?] Java Version openjdk 11.0.17 2022-10-18 OpenJDK Runtime Environment (build 11.0.17+8-post-Ubuntu-1ubuntu222.04) OpenJDK 64-Bit Server VM (build 11.0.17+8-post-Ubuntu-1ubuntu222.04, mixed mode, sharing) /etc/elasticsearch/elasticsearch.yml # ---------------------------------- Cluster ----------------------------------- # # Use a descriptive name for your cluster: # cluster.name: search # # ------------------------------------ Node ------------------------------------ # # Use a descriptive name for the node: # #node.name: node-1 # # Add custom attributes to the node: # #node.attr.rack: r1 # # ----------------------------------- Paths ------------------------------------ # # Path to directory where to store the data (separate multiple locations by comma): # path.data: /var/lib/elasticsearch # # Path to log files: # path.logs: /var/log/elasticsearch # # ----------------------------------- Memory ----------------------------------- # # Lock the memory on startup: # #bootstrap.memory_lock: true # # Make sure that the heap size is set to about half the memory available # on the system and that the owner of the process is allowed to use this # limit. # # Elasticsearch performs poorly when the system is swapping the memory. # # ---------------------------------- Network ----------------------------------- # # By default Elasticsearch is only accessible on localhost. Set a different # address here to expose this node on the network: # # network.host: 192.168.1.100 network.host: 0.0.0.0 # # By default Elasticsearch listens for HTTP traffic on the first free port it # finds starting at 9200. Set a specific HTTP port here: # #http.port: 9200 # # For more information, consult the network module documentation. # # --------------------------------- Discovery ---------------------------------- # # Pass an initial list of hosts to perform discovery when this node is started: # The default list of hosts is ["127.0.0.1", "[::1]"] # #discovery.seed_hosts: ["host1", "host2"] # # Bootstrap the cluster using an initial set of master-eligible nodes: # #cluster.initial_master_nodes: ["node-1", "node-2"] # # For more information, consult the discovery and cluster formation module documentation. # # ---------------------------------- Various ----------------------------------- # # Require explicit names when deleting indices: # action.destructive_requires_name: false # # ---------------------------------- Security ---------------------------------- # # *** WARNING *** # # Elasticsearch security features are not enabled by default. # These features are free, but require configuration changes to enable them. # This means that users don’t have to provide credentials and can get full access # to the cluster. Network connections are also not encrypted. # # To protect your data, we strongly encourage you to enable the Elasticsearch security features. # Refer to the following documentation for instructions. # # https://www.elastic.co/guide/en/elasticsearch/reference/7.16/configuring-stack-security.html xpack.security.enabled: true xpack.security.http.ssl.enabled: false xpack.security.http.ssl.key: /etc/elasticsearch/config/es-key.pem xpack.security.http.ssl.certificate: /etc/elasticsearch/config/es-cert.pem xpack.security.authc.api_key.enabled: true xpack: security: authc: realms: native: native1: order: 0 process.permissions.modifyThread: true A: To fix this error grant the user that Elasticsearch is running under the necessary permissions to modify threads. This can typically be done by adding the user to the java.lang.RuntimePermission group, or by modifying the java.policy file to grant the user the necessary permissions. For example, if the user that Elasticsearch is running under is named "elastic", add them to the java.lang.RuntimePermission group by running the following command: usermod -a -G java.lang.RuntimePermission elastic Another option is modify the java.policy file to grant users the necessary permissions. Find java.policy file in $JAVA_HOME/lib/security directory and the following lines to it: grant { permission java.lang.RuntimePermission "modifyThread"; }; WARN: grant block in the java.policy file applies to all users Then restart Elasticsearch in order for the changes to take effect (service elasticsearch restart). After these action Elasticsearch should be able to modify threads and connect to the remote host without encountering java.security.AccessControlException error.
Elastic Search - Java Permissions Issue
I'm trying to allow remote connections to elasticsearch(7.1.7). Whenever I change network.host in /etc/elastic/elasticsearch.yml to anything but the default value i get an error. error [2022-12-04T03:09:13,741][INFO ][o.e.x.m.p.NativeController] [ubuntu] Native controller process has stopped - no new native processes can be started [2022-12-04T03:09:13,741][ERROR][o.e.b.ElasticsearchUncaughtExceptionHandler] [ubuntu] uncaught exception in thread [process reaper (pid 2635649)] java.security.AccessControlException: access denied ("java.lang.RuntimePermission" "modifyThread") at java.security.AccessControlContext.checkPermission(AccessControlContext.java:485) ~[?:?] at java.security.AccessController.checkPermission(AccessController.java:1068) ~[?:?] at java.lang.SecurityManager.checkPermission(SecurityManager.java:411) ~[?:?] at org.elasticsearch.secure_sm.SecureSM.checkThreadAccess(SecureSM.java:160) ~[?:7.17.7] at org.elasticsearch.secure_sm.SecureSM.checkAccess(SecureSM.java:120) ~[?:7.17.7] at java.lang.Thread.checkAccess(Thread.java:2360) ~[?:?] at java.lang.Thread.setDaemon(Thread.java:2308) ~[?:?] at java.lang.ProcessHandleImpl.lambda$static$0(ProcessHandleImpl.java:103) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.<init>(ThreadPoolExecutor.java:637) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.addWorker(ThreadPoolExecutor.java:928) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.processWorkerExit(ThreadPoolExecutor.java:1021) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1158) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:642) ~[?:?] at java.lang.Thread.run(Thread.java:1589) [?:?] at jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:186) ~[?:?] Java Version openjdk 11.0.17 2022-10-18 OpenJDK Runtime Environment (build 11.0.17+8-post-Ubuntu-1ubuntu222.04) OpenJDK 64-Bit Server VM (build 11.0.17+8-post-Ubuntu-1ubuntu222.04, mixed mode, sharing) /etc/elasticsearch/elasticsearch.yml # ---------------------------------- Cluster ----------------------------------- # # Use a descriptive name for your cluster: # cluster.name: search # # ------------------------------------ Node ------------------------------------ # # Use a descriptive name for the node: # #node.name: node-1 # # Add custom attributes to the node: # #node.attr.rack: r1 # # ----------------------------------- Paths ------------------------------------ # # Path to directory where to store the data (separate multiple locations by comma): # path.data: /var/lib/elasticsearch # # Path to log files: # path.logs: /var/log/elasticsearch # # ----------------------------------- Memory ----------------------------------- # # Lock the memory on startup: # #bootstrap.memory_lock: true # # Make sure that the heap size is set to about half the memory available # on the system and that the owner of the process is allowed to use this # limit. # # Elasticsearch performs poorly when the system is swapping the memory. # # ---------------------------------- Network ----------------------------------- # # By default Elasticsearch is only accessible on localhost. Set a different # address here to expose this node on the network: # # network.host: 192.168.1.100 network.host: 0.0.0.0 # # By default Elasticsearch listens for HTTP traffic on the first free port it # finds starting at 9200. Set a specific HTTP port here: # #http.port: 9200 # # For more information, consult the network module documentation. # # --------------------------------- Discovery ---------------------------------- # # Pass an initial list of hosts to perform discovery when this node is started: # The default list of hosts is ["127.0.0.1", "[::1]"] # #discovery.seed_hosts: ["host1", "host2"] # # Bootstrap the cluster using an initial set of master-eligible nodes: # #cluster.initial_master_nodes: ["node-1", "node-2"] # # For more information, consult the discovery and cluster formation module documentation. # # ---------------------------------- Various ----------------------------------- # # Require explicit names when deleting indices: # action.destructive_requires_name: false # # ---------------------------------- Security ---------------------------------- # # *** WARNING *** # # Elasticsearch security features are not enabled by default. # These features are free, but require configuration changes to enable them. # This means that users don’t have to provide credentials and can get full access # to the cluster. Network connections are also not encrypted. # # To protect your data, we strongly encourage you to enable the Elasticsearch security features. # Refer to the following documentation for instructions. # # https://www.elastic.co/guide/en/elasticsearch/reference/7.16/configuring-stack-security.html xpack.security.enabled: true xpack.security.http.ssl.enabled: false xpack.security.http.ssl.key: /etc/elasticsearch/config/es-key.pem xpack.security.http.ssl.certificate: /etc/elasticsearch/config/es-cert.pem xpack.security.authc.api_key.enabled: true xpack: security: authc: realms: native: native1: order: 0 process.permissions.modifyThread: true
[ "To fix this error grant the user that Elasticsearch is running under the necessary permissions to modify threads. This can typically be done by adding the user to the java.lang.RuntimePermission group, or by modifying the java.policy file to grant the user the necessary permissions.\nFor example, if the user that Elasticsearch is running under is named \"elastic\", add them to the java.lang.RuntimePermission group by running the following command:\nusermod -a -G java.lang.RuntimePermission elastic\n\nAnother option is modify the java.policy file to grant users the necessary permissions. Find java.policy file in $JAVA_HOME/lib/security directory and the following lines to it:\ngrant {\n permission java.lang.RuntimePermission \"modifyThread\";\n};\n\nWARN: grant block in the java.policy file applies to all users\nThen restart Elasticsearch in order for the changes to take effect (service elasticsearch restart).\nAfter these action Elasticsearch should be able to modify threads and connect to the remote host without encountering java.security.AccessControlException error.\n" ]
[ 0 ]
[]
[]
[ "elasticsearch", "java", "linux", "ubuntu" ]
stackoverflow_0074672626_elasticsearch_java_linux_ubuntu.txt
Q: Spark hash of full dataframe Is it possible to find the hash (preferably hash 256) value of the full PySpark dataframe. I dont want to find hash of individual rows or columns. I know function exists in pySpark for column level hash calculation from pyspark.sql.functions import sha2 The requirement is to partiton a big dataframe based on years and for each year(small dataframes) find the hash value and persist the result in a table. Input (Product, Qauntity, Store, SoldDate) Read the data in a dataframe, partition by SoldDate, calculate the hash for each partition and write to a file/table. Output: (Date, hash) The reason I am doing this is I have to compare the run this process daily and then check whether the hash changed for any previous dates. There is file level md5 possible but dont want to generate files but calcualte hash on the fly for the partitions/small dataframes based on dates A: To compute the MD5 hash value of a PySpark dataframe, you can use the hashlib.md5() function from the hashlib module. This function returns an md5 object that you can use to compute the hash value of a dataframe. Here is an example of how you can use this approach to compute the MD5 hash value of a PySpark dataframe: import hashlib from pyspark.sql import Row # Create a sample dataframe data = [ Row(Product="Product1", Quantity=10, Store="Store1", SoldDate="2022-01-01"), Row(Product="Product2", Quantity=20, Store="Store2", SoldDate="2022-01-01"), Row(Product="Product3", Quantity=30, Store="Store3", SoldDate="2022-01-01"), Row(Product="Product1", Quantity=40, Store="Store1", SoldDate="2022-01-02"), Row(Product="Product2", Quantity=50, Store="Store2", SoldDate="2022-01-02"), Row(Product="Product3", Quantity=60, Store="Store3", SoldDate="2022-01-02"), ] df = spark.createDataFrame(data) # Compute the MD5 hash value of the dataframe df_hash = df.rdd.map(lambda x: hashlib.md5(str(x).encode()).hexdigest()).reduce(lambda x, y: x + y) # Print the hash value print(df_hash) This will compute the MD5 hash value of the dataframe by applying the hashlib.md5() function to each row of the dataframe, then combining the hash values of all the rows into a single hash value for the entire dataframe.
Spark hash of full dataframe
Is it possible to find the hash (preferably hash 256) value of the full PySpark dataframe. I dont want to find hash of individual rows or columns. I know function exists in pySpark for column level hash calculation from pyspark.sql.functions import sha2 The requirement is to partiton a big dataframe based on years and for each year(small dataframes) find the hash value and persist the result in a table. Input (Product, Qauntity, Store, SoldDate) Read the data in a dataframe, partition by SoldDate, calculate the hash for each partition and write to a file/table. Output: (Date, hash) The reason I am doing this is I have to compare the run this process daily and then check whether the hash changed for any previous dates. There is file level md5 possible but dont want to generate files but calcualte hash on the fly for the partitions/small dataframes based on dates
[ "To compute the MD5 hash value of a PySpark dataframe, you can use the hashlib.md5() function from the hashlib module. This function returns an md5 object that you can use to compute the hash value of a dataframe.\nHere is an example of how you can use this approach to compute the MD5 hash value of a PySpark dataframe:\nimport hashlib\nfrom pyspark.sql import Row\n\n# Create a sample dataframe\ndata = [\n Row(Product=\"Product1\", Quantity=10, Store=\"Store1\", SoldDate=\"2022-01-01\"),\n Row(Product=\"Product2\", Quantity=20, Store=\"Store2\", SoldDate=\"2022-01-01\"),\n Row(Product=\"Product3\", Quantity=30, Store=\"Store3\", SoldDate=\"2022-01-01\"),\n Row(Product=\"Product1\", Quantity=40, Store=\"Store1\", SoldDate=\"2022-01-02\"),\n Row(Product=\"Product2\", Quantity=50, Store=\"Store2\", SoldDate=\"2022-01-02\"),\n Row(Product=\"Product3\", Quantity=60, Store=\"Store3\", SoldDate=\"2022-01-02\"),\n]\ndf = spark.createDataFrame(data)\n\n# Compute the MD5 hash value of the dataframe\ndf_hash = df.rdd.map(lambda x: hashlib.md5(str(x).encode()).hexdigest()).reduce(lambda x, y: x + y)\n\n# Print the hash value\nprint(df_hash)\n\nThis will compute the MD5 hash value of the dataframe by applying the hashlib.md5() function to each row of the dataframe, then combining the hash values of all the rows into a single hash value for the entire dataframe.\n" ]
[ 0 ]
[]
[]
[ "hash", "pyspark" ]
stackoverflow_0074672598_hash_pyspark.txt
Q: Getting a 'flat' array out of Doctrine 2 queryBuilder with addSelect I'm wondering if there's a clean way to get around this little conundrum... Been getting around it, but finally have time to look into "the right way"! Doctrine Query Builder Snippet public function getSprockets(array $id_list, $include_stats = false ) { $qb = $this->getEntityManager()->createQueryBuilder() ->select('p') ->from( 'Entity\Foo', 'p' ) ->where('p.id IN ( :pids )') ->setParameter('pids', $id_list); if( $include_stats ) $qb->addSelect( '(SELECT SUM( a.value ) FROM Entity\Stats a WHERE a.foo_id = p.id AND a.type="bar" ) AS bar_count' ); $res = $qb->getQuery()->getArrayResult(); return $res; } As is, this snippet behaves very differently whether addSelect is invoked or no. If it is not there, I get a nice flat array as is expected. If the addSelect is however used ($include_stats is true), a row in $res is quite different, containing: the entity's parts in a $row[0] and the addSelect result at the base of the $row, e.g., $row['bar_count'] I realize that listing the columns in ->select('p.id, p.that, p.this') ... gets around the problem, but I don't want to have to maintain these many queries each time the schemata is changed. Is there a convenient-or-built-in way to get a flat array out of getArrayResult? Thanks! A: Yes, you can use the getScalarResult method instead of getArrayResult to get a flat array from a Doctrine query builder. The getScalarResult method returns an array of scalar values (i.e. strings, integers, etc.) rather than entities, so you won't have to deal with nested arrays or objects. Here's how you could modify your code to use getScalarResult: public function getSprockets(array $id_list, $include_stats = false ) { $qb = $this->getEntityManager()->createQueryBuilder() ->select('p') ->from( 'Entity\Foo', 'p' ) ->where('p.id IN ( :pids )') ->setParameter('pids', $id_list); if( $include_stats ) $qb->addSelect( '(SELECT SUM( a.value ) FROM Entity\Stats a WHERE a.foo_id = p.id AND a.type="bar" ) AS bar_count' ); // Use getScalarResult instead of getArrayResult $res = $qb->getQuery()->getScalarResult(); return $res; } This will return a flat array where each element is an array of scalar values, rather than an array of entities. For example, if you are selecting the id, name, and type fields from the Entity\Foo entity, you would get an array that looks something like this: [ ['id' => 1, 'name' => 'Foo 1', 'type' => 'Type A'], ['id' => 2, 'name' => 'Foo 2', 'type' => 'Type B'], ... ] If you are also selecting the bar_count value from the Entity\Stats entity, then each element in the array would look like this: [ ['id' => 1, 'name' => 'Foo 1', 'type' => 'Type A', 'bar_count' => 123], ['id' => 2, 'name' => 'Foo 2', 'type' => 'Type B', 'bar_count' => 456], ] ...
Getting a 'flat' array out of Doctrine 2 queryBuilder with addSelect
I'm wondering if there's a clean way to get around this little conundrum... Been getting around it, but finally have time to look into "the right way"! Doctrine Query Builder Snippet public function getSprockets(array $id_list, $include_stats = false ) { $qb = $this->getEntityManager()->createQueryBuilder() ->select('p') ->from( 'Entity\Foo', 'p' ) ->where('p.id IN ( :pids )') ->setParameter('pids', $id_list); if( $include_stats ) $qb->addSelect( '(SELECT SUM( a.value ) FROM Entity\Stats a WHERE a.foo_id = p.id AND a.type="bar" ) AS bar_count' ); $res = $qb->getQuery()->getArrayResult(); return $res; } As is, this snippet behaves very differently whether addSelect is invoked or no. If it is not there, I get a nice flat array as is expected. If the addSelect is however used ($include_stats is true), a row in $res is quite different, containing: the entity's parts in a $row[0] and the addSelect result at the base of the $row, e.g., $row['bar_count'] I realize that listing the columns in ->select('p.id, p.that, p.this') ... gets around the problem, but I don't want to have to maintain these many queries each time the schemata is changed. Is there a convenient-or-built-in way to get a flat array out of getArrayResult? Thanks!
[ "Yes, you can use the getScalarResult method instead of getArrayResult to get a flat array from a Doctrine query builder. The getScalarResult method returns an array of scalar values (i.e. strings, integers, etc.) rather than entities, so you won't have to deal with nested arrays or objects.\nHere's how you could modify your code to use getScalarResult:\npublic function getSprockets(array $id_list, $include_stats = false )\n{\n $qb = $this->getEntityManager()->createQueryBuilder()\n ->select('p')\n ->from( 'Entity\\Foo', 'p' )\n ->where('p.id IN ( :pids )')\n ->setParameter('pids', $id_list);\n\n if( $include_stats )\n $qb->addSelect( '(SELECT SUM( a.value ) FROM Entity\\Stats a WHERE a.foo_id = p.id AND a.type=\"bar\" ) AS bar_count' );\n\n // Use getScalarResult instead of getArrayResult\n $res = $qb->getQuery()->getScalarResult();\n return $res;\n}\n\nThis will return a flat array where each element is an array of scalar values, rather than an array of entities. For example, if you are selecting the id, name, and type fields from the Entity\\Foo entity, you would get an array that looks something like this:\n[\n ['id' => 1, 'name' => 'Foo 1', 'type' => 'Type A'],\n ['id' => 2, 'name' => 'Foo 2', 'type' => 'Type B'],\n ...\n]\n\nIf you are also selecting the bar_count value from the Entity\\Stats entity, then each element in the array would look like this:\n[\n ['id' => 1, 'name' => 'Foo 1', 'type' => 'Type A', 'bar_count' => 123],\n ['id' => 2, 'name' => 'Foo 2', 'type' => 'Type B', 'bar_count' => 456],\n]\n...\n\n" ]
[ 0 ]
[]
[]
[ "doctrine_orm", "php" ]
stackoverflow_0034886020_doctrine_orm_php.txt
Q: Axios GET request with manual proxy setup (http-proxy-middleware) returns HTML instead of Json I am trying to get data with ReactJS in frontend only (no access to backend). Due to Cors problems I am using manual proxy setup with http-proxy-middleware - file setupProxy.js in src folder. I do not want to use proxy directly in package.json. In postman I get correct json data, but the axios response is just html with header and footer of requested page, to me it looks like the whole URL is maybe not added to base url. Expected result URL is this (works in postman): https://www.zbozi.cz/api/v3/product/sonett-tuhe-mydlo-na-ruce-curd-soap-100g/?limitTopOffers=0&limitCheapOffers=10&filterFields=offersData This is my setupProxy.js: const proxy = require("http-proxy-middleware"); module.exports = function(app) { app.use( proxy("/product", { target: "https://www.zbozi.cz", secure: false, changeOrigin: true }) ); }; This is my axios call: axios .get(`/product/api/v3/product/sonett-tuhe-mydlo-na-ruce-curd-soap-100g`, { headers: {Accept: 'application/json'}, params: { limitTopOffers: 0, limitCheapOffers: 10, filterFields: "offersData", } } ).then( (product) => { console.log(product) } ) This is what I get as result. It returns only header and footer of the page www.zbozi.cz, not data. <!DOCTYPE html><html lang="cs"><head><meta charSet="utf-8"/><title>Zboží.cz - Tisíce obchodů na jednom místě</title><meta name="description" content="Na Zboží.cz jsou produkty včetně popisů, recenzí, příslušenství a návodů. Ceny si navíc můžete srovnat od těch nejlevnějších."/><meta name="szn:status"/><meta name="viewport" content="width=device-width, initial-scale=1"/><meta name="next-head-count" content="5"/><meta http-equiv="X-UA-Compatible" content="IE=Edge"/><noscript><meta http-equiv="refresh" content="0;url=?_escaped_fragment_="/></noscript><meta name="referrer" content="origin"/><meta name="seznam-wmt" content="Hyz1YOQsFrCoCFcTDiRJgQEZNSjZpwbf"/><link rel="shortcut icon" href="/img/favicon/favicon.ico?version-9.2.0" type="image/x-icon"/><link rel="apple-touch-icon" sizes="57x57" href="/img/favicon/apple-touch-icon-57x57.png?version-9.2.0"/><link rel="apple-touch-icon" sizes="60x60" href="/img/favicon/apple-touch-icon-60x60.png?version-9.2.0"/><link rel="apple-touch-icon" sizes="72x72" href="/img/favicon/apple-touch-icon-72x72.png?version-9.2.0"/><link rel="apple-touch-icon" sizes="76x76" href="/img/favicon/apple-touch-icon-76x76.png?version-9.2.0"/><link rel="apple-touch-icon" sizes="114x114" href="/img/favicon/apple-touch-icon-114x114.png?version-9.2.0"/><link rel="apple-touch-icon" sizes="120x120" href="/img/favicon/apple-touch-icon-120x120.png?version-9.2.0"/><link rel="apple-touch-icon" sizes="144x144" href="/img/favicon/apple-touch-icon-144x144.png?version-9.2.0"/><link rel="apple-touch-icon" sizes="152x152" href="/img/favicon/apple-touch-icon-152x152.png?version-9.2.0"/><link rel="apple-touch-icon" sizes="180x180" href="/img/favicon/apple-touch-icon-180x180.png?version-9.2.0"/><meta name="msapplication-TileColor" content="#666666"/><meta name="msapplication-config" content="/img/favicon/browserconfig.xml?version-9.2.0"/><meta name="msapplication-TileImage" content="/img/favicon/mstile-144x144.png?version-9.2.0"/><link rel="search" type="application/opensearchdescription+xml" title="Zboží.cz" href="/zbozi-cz.xml"/><link rel="preload" href="/fonts/TriviaSeznam.woff" as="font" type="font/woff" crossorigin="anonymous"/> .......... I've tried some magic with adding and not adding "/" in both proxy and axios settings but got the same result. I've tried to add "headers: {Accept: 'application/json'}" to axios but the problem is somewhere else. A: To fix the issue with the Axios GET request returning HTML instead of JSON, you can try modifying your setupProxy.js file and your Axios call as follows: // setupProxy.js const proxy = require("http-proxy-middleware"); module.exports = function(app) { app.use( "/api", proxy({ target: "https://www.zbozi.cz", changeOrigin: true, pathRewrite: { "^/api": "" } }) ); }; // Axios call axios .get( "/api/v3/product/sonett-tuhe-mydlo-na-ruce-curd-soap-100g", { headers: { Accept: "application/json" }, params: { limitTopOffers: 0, limitCheapOffers: 10, filterFields: "offersData" } } ) .then(product => { console.log(product); }); In the above code, I have added the /api prefix to the URL in the Axios call, and I have updated the setupProxy.js file to include the /api prefix in the proxy configuration. This ensures that the correct URL is being sent to the target server, and the response should be JSON rather than HTML. Additionally, I have added the pathRewrite option to the proxy configuration to remove the /api prefix from the proxied request. This ensures that the full URL is sent to the target server, which should fix the issue with the response being HTML instead of JSON.
Axios GET request with manual proxy setup (http-proxy-middleware) returns HTML instead of Json
I am trying to get data with ReactJS in frontend only (no access to backend). Due to Cors problems I am using manual proxy setup with http-proxy-middleware - file setupProxy.js in src folder. I do not want to use proxy directly in package.json. In postman I get correct json data, but the axios response is just html with header and footer of requested page, to me it looks like the whole URL is maybe not added to base url. Expected result URL is this (works in postman): https://www.zbozi.cz/api/v3/product/sonett-tuhe-mydlo-na-ruce-curd-soap-100g/?limitTopOffers=0&limitCheapOffers=10&filterFields=offersData This is my setupProxy.js: const proxy = require("http-proxy-middleware"); module.exports = function(app) { app.use( proxy("/product", { target: "https://www.zbozi.cz", secure: false, changeOrigin: true }) ); }; This is my axios call: axios .get(`/product/api/v3/product/sonett-tuhe-mydlo-na-ruce-curd-soap-100g`, { headers: {Accept: 'application/json'}, params: { limitTopOffers: 0, limitCheapOffers: 10, filterFields: "offersData", } } ).then( (product) => { console.log(product) } ) This is what I get as result. It returns only header and footer of the page www.zbozi.cz, not data. <!DOCTYPE html><html lang="cs"><head><meta charSet="utf-8"/><title>Zboží.cz - Tisíce obchodů na jednom místě</title><meta name="description" content="Na Zboží.cz jsou produkty včetně popisů, recenzí, příslušenství a návodů. Ceny si navíc můžete srovnat od těch nejlevnějších."/><meta name="szn:status"/><meta name="viewport" content="width=device-width, initial-scale=1"/><meta name="next-head-count" content="5"/><meta http-equiv="X-UA-Compatible" content="IE=Edge"/><noscript><meta http-equiv="refresh" content="0;url=?_escaped_fragment_="/></noscript><meta name="referrer" content="origin"/><meta name="seznam-wmt" content="Hyz1YOQsFrCoCFcTDiRJgQEZNSjZpwbf"/><link rel="shortcut icon" href="/img/favicon/favicon.ico?version-9.2.0" type="image/x-icon"/><link rel="apple-touch-icon" sizes="57x57" href="/img/favicon/apple-touch-icon-57x57.png?version-9.2.0"/><link rel="apple-touch-icon" sizes="60x60" href="/img/favicon/apple-touch-icon-60x60.png?version-9.2.0"/><link rel="apple-touch-icon" sizes="72x72" href="/img/favicon/apple-touch-icon-72x72.png?version-9.2.0"/><link rel="apple-touch-icon" sizes="76x76" href="/img/favicon/apple-touch-icon-76x76.png?version-9.2.0"/><link rel="apple-touch-icon" sizes="114x114" href="/img/favicon/apple-touch-icon-114x114.png?version-9.2.0"/><link rel="apple-touch-icon" sizes="120x120" href="/img/favicon/apple-touch-icon-120x120.png?version-9.2.0"/><link rel="apple-touch-icon" sizes="144x144" href="/img/favicon/apple-touch-icon-144x144.png?version-9.2.0"/><link rel="apple-touch-icon" sizes="152x152" href="/img/favicon/apple-touch-icon-152x152.png?version-9.2.0"/><link rel="apple-touch-icon" sizes="180x180" href="/img/favicon/apple-touch-icon-180x180.png?version-9.2.0"/><meta name="msapplication-TileColor" content="#666666"/><meta name="msapplication-config" content="/img/favicon/browserconfig.xml?version-9.2.0"/><meta name="msapplication-TileImage" content="/img/favicon/mstile-144x144.png?version-9.2.0"/><link rel="search" type="application/opensearchdescription+xml" title="Zboží.cz" href="/zbozi-cz.xml"/><link rel="preload" href="/fonts/TriviaSeznam.woff" as="font" type="font/woff" crossorigin="anonymous"/> .......... I've tried some magic with adding and not adding "/" in both proxy and axios settings but got the same result. I've tried to add "headers: {Accept: 'application/json'}" to axios but the problem is somewhere else.
[ "To fix the issue with the Axios GET request returning HTML instead of JSON, you can try modifying your setupProxy.js file and your Axios call as follows:\n// setupProxy.js\nconst proxy = require(\"http-proxy-middleware\");\n\nmodule.exports = function(app) {\n app.use(\n \"/api\",\n proxy({\n target: \"https://www.zbozi.cz\",\n changeOrigin: true,\n pathRewrite: {\n \"^/api\": \"\"\n }\n })\n );\n};\n\n// Axios call\naxios\n .get(\n \"/api/v3/product/sonett-tuhe-mydlo-na-ruce-curd-soap-100g\",\n {\n headers: { Accept: \"application/json\" },\n params: {\n limitTopOffers: 0,\n limitCheapOffers: 10,\n filterFields: \"offersData\"\n }\n }\n )\n .then(product => {\n console.log(product);\n });\n\nIn the above code, I have added the /api prefix to the URL in the Axios call, and I have updated the setupProxy.js file to include the /api prefix in the proxy configuration. This ensures that the correct URL is being sent to the target server, and the response should be JSON rather than HTML.\nAdditionally, I have added the pathRewrite option to the proxy configuration to remove the /api prefix from the proxied request. This ensures that the full URL is sent to the target server, which should fix the issue with the response being HTML instead of JSON.\n" ]
[ 0 ]
[]
[]
[ "axios", "cors", "http_proxy_middleware", "proxy", "reactjs" ]
stackoverflow_0074674589_axios_cors_http_proxy_middleware_proxy_reactjs.txt
Q: error installing packages in R. (Windows) I just installed R and Rstudio and I'm new to programming in general. I just tried to install "ggally" library for data visualization but it's showing me the following repetetive error. Am I doing something wrong? ERROR: dependencies 'dplyr', 'ellipsis', 'glue', 'lifecycle', 'magrittr', 'purrr', 'rlang', 'tibble', 'tidyselect', 'vctrs' are not available for package 'tidyr' * removing 'C:/Users/smain/AppData/Local/R/win-library/4.2/tidyr' Warning in install.packages : installation of package ‘tidyr’ had non-zero exit status ERROR: dependencies 'ggplot2', 'dplyr', 'forcats', 'lifecycle', 'plyr', 'progress', 'reshape', 'rlang', 'scales', 'tidyr' are not available for package 'GGally' * removing 'C:/Users/smain/AppData/Local/R/win-library/4.2/GGally' Warning in install.packages : installation of package ‘GGally’ had non-zero exit status The downloaded source packages are in ‘C:\Users\smain\AppData\Local\Temp\RtmpI92I7C\downloaded_packages’ A: I think you should be conservative about it and do two steps first: 1- If you have an active Antivirus on your computer, temporarily disable it. 2- If you use RStudio, close the software, right-click on the icon and choose Run as administrator to open it. Then, you can re-install the package with the right repository address as below: install.packages('GGally', dependencies=TRUE, repos="https://CRAN.R-project.org/") library(GGally) I hope it helps.
error installing packages in R. (Windows)
I just installed R and Rstudio and I'm new to programming in general. I just tried to install "ggally" library for data visualization but it's showing me the following repetetive error. Am I doing something wrong? ERROR: dependencies 'dplyr', 'ellipsis', 'glue', 'lifecycle', 'magrittr', 'purrr', 'rlang', 'tibble', 'tidyselect', 'vctrs' are not available for package 'tidyr' * removing 'C:/Users/smain/AppData/Local/R/win-library/4.2/tidyr' Warning in install.packages : installation of package ‘tidyr’ had non-zero exit status ERROR: dependencies 'ggplot2', 'dplyr', 'forcats', 'lifecycle', 'plyr', 'progress', 'reshape', 'rlang', 'scales', 'tidyr' are not available for package 'GGally' * removing 'C:/Users/smain/AppData/Local/R/win-library/4.2/GGally' Warning in install.packages : installation of package ‘GGally’ had non-zero exit status The downloaded source packages are in ‘C:\Users\smain\AppData\Local\Temp\RtmpI92I7C\downloaded_packages’
[ "I think you should be conservative about it and do two steps first:\n1- If you have an active Antivirus on your computer, temporarily disable it.\n2- If you use RStudio, close the software, right-click on the icon and choose Run as administrator to open it.\nThen, you can re-install the package with the right repository address as below:\ninstall.packages('GGally', dependencies=TRUE, repos=\"https://CRAN.R-project.org/\")\nlibrary(GGally)\n\nI hope it helps.\n" ]
[ 0 ]
[]
[]
[ "r", "rlang" ]
stackoverflow_0074169335_r_rlang.txt
Q: How to Install "RarArchive" on MacbookPro? Anyone tried installing " RarArchive on their macbookpro? I tried installing but I got this error .1.13/include/php/ext/date/lib -DHAVE_CONFIG_H -g -O2 -DRARDLL -DSILENT -Wno-write-strings -Wall -fvisibility=hidden -I/private/tmp/pear/temp/rar/unrar -DZEND_COMPILE_DL_EXT=1 -c /private/tmp/pear/temp/rar/rar_stream.c -MMD -MF rar_stream.dep -MT rar_stream.lo -fno-common -DPIC -o .libs/rar_stream.o /private/tmp/pear/temp/rar/rar_stream.c:170:23: warning: format specifies type 'unsigned long' but the argument has type 'uint64' (aka 'unsigned long long') [-Wformat] self->file_size, self->cursor); ^~~~~~~~~~~~ /private/tmp/pear/temp/rar/rar_stream.c:787:58: error: too many arguments to function call, expected 1, have 2 zend_string *arc_str = zend_resolve_path(tmp_archive, tmp_arch_len); ~~~~~~~~~~~~~~~~~ ^~~~~~~~~~~~ 1 warning and 1 error generated. make: *** [rar_stream.lo] Error 1 rolling back 454 file operations ERROR: `make' failed Cause I'm trying to solve this warning on my PHP IDE warning. Thank you! I tried installing via pecl but it did not work. A: I fixed this issue by downgrading the PHP version from 8.1 to 8 and all the error is gone. Hopefully, this will help others as well.
How to Install "RarArchive" on MacbookPro?
Anyone tried installing " RarArchive on their macbookpro? I tried installing but I got this error .1.13/include/php/ext/date/lib -DHAVE_CONFIG_H -g -O2 -DRARDLL -DSILENT -Wno-write-strings -Wall -fvisibility=hidden -I/private/tmp/pear/temp/rar/unrar -DZEND_COMPILE_DL_EXT=1 -c /private/tmp/pear/temp/rar/rar_stream.c -MMD -MF rar_stream.dep -MT rar_stream.lo -fno-common -DPIC -o .libs/rar_stream.o /private/tmp/pear/temp/rar/rar_stream.c:170:23: warning: format specifies type 'unsigned long' but the argument has type 'uint64' (aka 'unsigned long long') [-Wformat] self->file_size, self->cursor); ^~~~~~~~~~~~ /private/tmp/pear/temp/rar/rar_stream.c:787:58: error: too many arguments to function call, expected 1, have 2 zend_string *arc_str = zend_resolve_path(tmp_archive, tmp_arch_len); ~~~~~~~~~~~~~~~~~ ^~~~~~~~~~~~ 1 warning and 1 error generated. make: *** [rar_stream.lo] Error 1 rolling back 454 file operations ERROR: `make' failed Cause I'm trying to solve this warning on my PHP IDE warning. Thank you! I tried installing via pecl but it did not work.
[ "I fixed this issue by downgrading the PHP version from 8.1 to 8 and all the error is gone.\nHopefully, this will help others as well.\n" ]
[ 0 ]
[]
[]
[ "php" ]
stackoverflow_0074672146_php.txt
Q: How do I fill a list with with tuples using a for-loop in python? I just finished implementing a working Python code for the Dijkstra-Pathfinding algorithm. I am applying this algorithm to a graph with edges, which I have written as a list of tuples: graph = Graph([ ("a", "b", 2),("a", "c", 5), ("a", "d", 2),("b", "c", 3), ("b", "e", 1),("c", "e", 1), ("c", "h", 1),("c", "f", 1), ("c", "d", 3),("d", "g", 2), ("e", "i", 7),("f", "h", 3), ("f", "g", 2),("h", "i", 1)]) I don't want to leave it like that and rather fill the graph using a for-loop, but this is exactly where I fail. I have tried writing graph.append(("i", "j", "4")) And several other variants using the append function but it just keeps giving me errors. I am aware that this isn't a for-loop, I am simply trying to add one edge for now. This is how I defined my add_edge function: Edge = namedtuple('Edge', 'start, end, cost') def add_edge(start, end, cost): return Edge(start, end, cost) A: In this line the parenthesis are serving as a container for multiple string arguments. graph.append("i", "j", "4") You need to add a layer of nested parenthesis to indicate that the argument is a single tuple. graph.append(("i", "j", "4")) A: To add an edge to a graph, you can use the add_edge method of the Graph class. This method takes three arguments: the source node, the destination node, and the weight of the edge. Here is an example of how you might use the add_edge method to add an edge to your graph: # Create a graph graph = Graph([ ("a", "b", 2),("a", "c", 5), ("a", "d", 2),("b", "c", 3), ("b", "e", 1),("c", "e", 1), ("c", "h", 1),("c", "f", 1), ("c", "d", 3),("d", "g", 2), ("e", "i", 7),("f", "h", 3), ("f", "g", 2),("h", "i", 1)]) # Add an edge to the graph graph.add_edge("i", "j", 4) If you want to add multiple edges to your graph using a for-loop, you can use the add_edge method inside the for-loop to add each edge. Here is an example of how you might do this: # Create a list of edges to add to the graph edges = [("i", "j", 4), ("j", "k", 5), ("k", "l", 6)] # Create a graph graph = Graph([ ("a", "b", 2),("a", "c", 5), ("a", "d", 2),("b", "c", 3), ("b", "e", 1),("c", "e", 1), ("c", "h", 1),("c", "f", 1), ("c", "d", 3),("d", "g", 2), ("e", "i", 7),("f", "h", 3), ("f", "g", 2),("h", "i", 1)]) # Iterate over the edges in the list for source, destination, weight in edges: # Add the edge to the graph graph.add_edge(source, destination, weight) This should add the edges in the edges list to your graph.
How do I fill a list with with tuples using a for-loop in python?
I just finished implementing a working Python code for the Dijkstra-Pathfinding algorithm. I am applying this algorithm to a graph with edges, which I have written as a list of tuples: graph = Graph([ ("a", "b", 2),("a", "c", 5), ("a", "d", 2),("b", "c", 3), ("b", "e", 1),("c", "e", 1), ("c", "h", 1),("c", "f", 1), ("c", "d", 3),("d", "g", 2), ("e", "i", 7),("f", "h", 3), ("f", "g", 2),("h", "i", 1)]) I don't want to leave it like that and rather fill the graph using a for-loop, but this is exactly where I fail. I have tried writing graph.append(("i", "j", "4")) And several other variants using the append function but it just keeps giving me errors. I am aware that this isn't a for-loop, I am simply trying to add one edge for now. This is how I defined my add_edge function: Edge = namedtuple('Edge', 'start, end, cost') def add_edge(start, end, cost): return Edge(start, end, cost)
[ "In this line the parenthesis are serving as a container for multiple string arguments.\ngraph.append(\"i\", \"j\", \"4\")\n\nYou need to add a layer of nested parenthesis to indicate that the argument is a single tuple.\ngraph.append((\"i\", \"j\", \"4\"))\n\n", "To add an edge to a graph, you can use the add_edge method of the Graph class. This method takes three arguments: the source node, the destination node, and the weight of the edge.\nHere is an example of how you might use the add_edge method to add an edge to your graph:\n# Create a graph\ngraph = Graph([\n (\"a\", \"b\", 2),(\"a\", \"c\", 5),\n (\"a\", \"d\", 2),(\"b\", \"c\", 3),\n (\"b\", \"e\", 1),(\"c\", \"e\", 1),\n (\"c\", \"h\", 1),(\"c\", \"f\", 1),\n (\"c\", \"d\", 3),(\"d\", \"g\", 2),\n (\"e\", \"i\", 7),(\"f\", \"h\", 3),\n (\"f\", \"g\", 2),(\"h\", \"i\", 1)])\n\n# Add an edge to the graph\ngraph.add_edge(\"i\", \"j\", 4)\n\nIf you want to add multiple edges to your graph using a for-loop, you can use the add_edge method inside the for-loop to add each edge. Here is an example of how you might do this:\n# Create a list of edges to add to the graph\nedges = [(\"i\", \"j\", 4), (\"j\", \"k\", 5), (\"k\", \"l\", 6)]\n\n# Create a graph\ngraph = Graph([\n (\"a\", \"b\", 2),(\"a\", \"c\", 5),\n (\"a\", \"d\", 2),(\"b\", \"c\", 3),\n (\"b\", \"e\", 1),(\"c\", \"e\", 1),\n (\"c\", \"h\", 1),(\"c\", \"f\", 1),\n (\"c\", \"d\", 3),(\"d\", \"g\", 2),\n (\"e\", \"i\", 7),(\"f\", \"h\", 3),\n (\"f\", \"g\", 2),(\"h\", \"i\", 1)])\n\n# Iterate over the edges in the list\nfor source, destination, weight in edges:\n # Add the edge to the graph\n graph.add_edge(source, destination, weight)\n\n\nThis should add the edges in the edges list to your graph.\n" ]
[ 0, 0 ]
[]
[]
[ "algorithm", "dijkstra", "graph_theory", "python", "search" ]
stackoverflow_0074674611_algorithm_dijkstra_graph_theory_python_search.txt
Q: Docker SQL-Server login problem: AuthenticationException: The remote certificate was rejected by the provided RemoteCertificateValidationCallback I'm working on a Docker related application, written in C#, based on Entity Framework. One of the Docker containers, is the ms-sql-server, which is used for database access. In order to access the SQL-server, following connectionString is created: Server=ms-sql-server;Initial Catalog=Ownobjects;User ID=SA;Password=some_password While performing following source code: public void InitialiseDatabase(IApplicationBuilder app) { using (var context = app.ApplicationServices.GetRequiredService<ApplicationDbContext>()) { context.Database.EnsureCreated(); ... ... I get following Exception: Microsoft.Data.SqlClient.SqlException: 'A connection was successfully established with the server, but then an error occurred during the pre-login handshake. (provider: TCP Provider, error: 35 - An internal exception was caught)' Inner Exception AuthenticationException: The remote certificate was rejected by the provided RemoteCertificateValidationCallback. According to this similar StackOverflow post, two things might not match: the instance name in the connection string and the expected name from the instance. Is this true? In that case, where or how can I find both names? If this is not true, then what might be causing this issue? Edit: Some further examination: In the Logs of the ms-sql-server Docker container, I've found following line: Server name is 'e614890825ac'. This is an informational message only. No user action is required I've used this entry in the connectionString but then the same Exception is raised, but this time with following Inner Exception:: ExtendedSocketException: Resource temporarily unavailable Edit2: Again some further examination: In the meantime I've discovered that my original servername in the connectionString is correct. Thanks in advance A: I've spent some time solving this message SQL Server Pre-Login Handshake Solution 1 (Encryption in client container): Dockerfile client (github issue) FROM mcr.microsoft.com/dotnet/aspnet:6.0 AS base # ... # SQL Server trusted connection problem RUN sed -i 's/CipherString = DEFAULT@SECLEVEL=2/CipherString = DEFAULT@SECLEVEL=1/g' /etc/ssl/openssl.cnf # ... Note: There's no need to change your connection string adding Encrypt=false or anything else with this setting Note 2: I'have tried to use focal and other linux images and only aspnet:6.0 works with this setting. Solution 2 (Image version or storage problems): Change the runtime version of the client container (see github comments) Replace the binding storage or volume in sql server container. This could be a user access problem. Note: There are some comments about this github issue
Docker SQL-Server login problem: AuthenticationException: The remote certificate was rejected by the provided RemoteCertificateValidationCallback
I'm working on a Docker related application, written in C#, based on Entity Framework. One of the Docker containers, is the ms-sql-server, which is used for database access. In order to access the SQL-server, following connectionString is created: Server=ms-sql-server;Initial Catalog=Ownobjects;User ID=SA;Password=some_password While performing following source code: public void InitialiseDatabase(IApplicationBuilder app) { using (var context = app.ApplicationServices.GetRequiredService<ApplicationDbContext>()) { context.Database.EnsureCreated(); ... ... I get following Exception: Microsoft.Data.SqlClient.SqlException: 'A connection was successfully established with the server, but then an error occurred during the pre-login handshake. (provider: TCP Provider, error: 35 - An internal exception was caught)' Inner Exception AuthenticationException: The remote certificate was rejected by the provided RemoteCertificateValidationCallback. According to this similar StackOverflow post, two things might not match: the instance name in the connection string and the expected name from the instance. Is this true? In that case, where or how can I find both names? If this is not true, then what might be causing this issue? Edit: Some further examination: In the Logs of the ms-sql-server Docker container, I've found following line: Server name is 'e614890825ac'. This is an informational message only. No user action is required I've used this entry in the connectionString but then the same Exception is raised, but this time with following Inner Exception:: ExtendedSocketException: Resource temporarily unavailable Edit2: Again some further examination: In the meantime I've discovered that my original servername in the connectionString is correct. Thanks in advance
[ "I've spent some time solving this message SQL Server Pre-Login Handshake\nSolution 1 (Encryption in client container):\nDockerfile client (github issue)\nFROM mcr.microsoft.com/dotnet/aspnet:6.0 AS base\n# ...\n\n# SQL Server trusted connection problem\nRUN sed -i 's/CipherString = DEFAULT@SECLEVEL=2/CipherString = DEFAULT@SECLEVEL=1/g' /etc/ssl/openssl.cnf\n\n# ...\n\nNote: There's no need to change your connection string adding Encrypt=false or anything else with this setting\nNote 2: I'have tried to use focal and other linux images and only aspnet:6.0 works with this setting.\nSolution 2 (Image version or storage problems):\n\nChange the runtime version of the client container (see github comments)\nReplace the binding storage or volume in sql server container. This could be a user access problem.\n\nNote: There are some comments about this github issue\n" ]
[ 0 ]
[]
[]
[ "c#", "connection_string", "docker", "entity_framework", "sql_server" ]
stackoverflow_0074601743_c#_connection_string_docker_entity_framework_sql_server.txt
Q: Index Signatures With predetermine type in TypeScript I have the following code: My goal is that the attribute1 will be number[], attribute2 will be string[] and attribute3 will be number[]. How i can achieve that with typescript in order to have compilation error if i try to execute the following line obj.attributes.attribute2 = [1, 2, 3]; type MyFields = "attribute1" | "attribute2" | "attribute3"; type MyTypes = number[] | string[] | number[]; interface DynamicType { attributes: { [attribute in MyFields]: MyTypes; }; } const obj: DynamicType = { attributes: { attribute1: [], attribute2: [], attribute3: [], }, }; obj.attributes.attribute1 = [1, 2, 3]; obj.attributes.attribute2 = [1, 2, 3]; // i need to have a compilation error here! obj.attributes.attribute1 = [1, 2, 3]; A: use type assertions to specify the type of each attribute in the attributes object. const obj: DynamicType = { attributes: { attribute1: [] as number[], attribute2: [] as string[], attribute3: [] as number[], }, }; obj.attributes.attribute1 = [1, 2, 3]; obj.attributes.attribute2 = ["a", "b", "c"]; obj.attributes.attribute3 = [4, 5, 6]; The TypeScript compiler will give you an error because the attribute2 attribute is defined as a string[] and you are trying to assign a value of type number[] to it.
Index Signatures With predetermine type in TypeScript
I have the following code: My goal is that the attribute1 will be number[], attribute2 will be string[] and attribute3 will be number[]. How i can achieve that with typescript in order to have compilation error if i try to execute the following line obj.attributes.attribute2 = [1, 2, 3]; type MyFields = "attribute1" | "attribute2" | "attribute3"; type MyTypes = number[] | string[] | number[]; interface DynamicType { attributes: { [attribute in MyFields]: MyTypes; }; } const obj: DynamicType = { attributes: { attribute1: [], attribute2: [], attribute3: [], }, }; obj.attributes.attribute1 = [1, 2, 3]; obj.attributes.attribute2 = [1, 2, 3]; // i need to have a compilation error here! obj.attributes.attribute1 = [1, 2, 3];
[ "use type assertions to specify the type of each attribute in the attributes object.\nconst obj: DynamicType = {\n attributes: {\n attribute1: [] as number[],\n attribute2: [] as string[],\n attribute3: [] as number[],\n },\n};\n\nobj.attributes.attribute1 = [1, 2, 3];\nobj.attributes.attribute2 = [\"a\", \"b\", \"c\"];\nobj.attributes.attribute3 = [4, 5, 6];\n\nThe TypeScript compiler will give you an error because the attribute2 attribute is defined as a string[] and you are trying to assign a value of type number[] to it.\n" ]
[ 0 ]
[]
[]
[ "javascript", "typescript" ]
stackoverflow_0074674616_javascript_typescript.txt
Q: How to switch git branches in vscode? I have GitHub repository with multiple branches, and I want to commit and push changes to a specific branch, how can I switch from "master*" branch to another branch? I tried to switch by clicking the branch name in the left bottom side of the screen and it didn't switch. A: Access the "Source Control" tab on the left side of VSCode Click on the "three small dots" next to the refresh button Click on the "Checkout to..." option Choose the branch you want to switch to You can also try this: you do git branch branch_name then git checkout branch_name A: // it will create new branch and switch to that branch git checkout -b branch-name // it will directly switch to previously created branch git checkout branch-name A: for switching between the branches. If you have already more than one branch. git switch <branch-name> //name of the branch where you want to switch or, git checkout <branch-name> for creating the branch you can use git branch <branch-name> //Name of the branch you want to create and for creating and switching to that branch simuntaneously. git switch -c <branch-name>
How to switch git branches in vscode?
I have GitHub repository with multiple branches, and I want to commit and push changes to a specific branch, how can I switch from "master*" branch to another branch? I tried to switch by clicking the branch name in the left bottom side of the screen and it didn't switch.
[ "\nAccess the \"Source Control\" tab on the left side of VSCode\nClick on the \"three small dots\" next to the refresh button\nClick on the \"Checkout to...\" option\nChoose the branch you want to switch to\n\nYou can also try this:\nyou do git branch branch_name then git checkout branch_name\n", "// it will create new branch and switch to that branch\ngit checkout -b branch-name\n\n// it will directly switch to previously created branch\ngit checkout branch-name\n\n", "for switching between the branches. If you have already more than one branch.\ngit switch <branch-name> //name of the branch where you want to switch \n\nor,\ngit checkout <branch-name>\n\nfor creating the branch you can use\ngit branch <branch-name> //Name of the branch you want to create\n\nand for creating and switching to that branch simuntaneously.\ngit switch -c <branch-name>\n\n" ]
[ 0, 0, 0 ]
[]
[]
[ "git_branch", "visual_studio_code" ]
stackoverflow_0074632926_git_branch_visual_studio_code.txt
Q: Can I configure a Azure's key vault secret per service principal? I'm used to use GCP's secret manager. There, we can create a secret and give a specific READ permission for one specific service account. I mean, let's say we create a secret ABC and a service account "getsecretaccount", I can give the read permission for this SA called getsecretaccount to access the ABC secret. This getsecretaccount will not have access to any other secret there. Can I achieve this scenario in Azure Key Vault? Thx!! A: if you have 1 azure vault with multiple secrets and you create an access policy for a user and select permission like "get" and/or "list", that user will be able to see all secrets under that vault. no option for separated permission per individual secret A: Authorization Key vault offer two different Authorizations: older Vault access policy and newer Azure role-based access control Vault access policy BrunoLucasAzure already explained how this works. Personally I don't recommend using vault access policy unless you have some specific reason to use it: resource re-deployment will reset existing authorization defined in key vault properties. ref: https://learn.microsoft.com/en-us/azure/templates/microsoft.keyvault/vaults?pivots=deployment-language-bicep#resource-format passing [] to accessPolicies will clear access policy list passing null or not using accessPolicies in template will generate error of course you can read value of accessPoliciesbefore re-deployment and then re-use existing value when re-deploying resource. TBH that kind of hacks should not be required by resource. For more information: https://docs.microsoft.com/azure/key-vault/general/assign-access-policy-portal Azure role-based access control Azure role-based access control based key vault authorization uses Azure RBAC assignments for key vault data layer access control. There is multiple ready-made RBAC roles for key vault data layer: Key Vault Administrator Key Vault Secrets Officer Key Vault Secrets User Key Vault Reader etc Documentation only mentions thins Azure RBAC for key vault also provides the ability to have separate permissions on individual keys, secrets, and certificates => you can add RBAC roles into individual key/secret/certificate Note that key/secret/certificate must be created before you can add RBAC to it. For more information: https://learn.microsoft.com/en-us/azure/key-vault/general/rbac-guide?tabs=azure-cli The answer You can use both methods: if you use older vault access policy, you need to create one key vault per service principal if you use RBAC based access control, you can manage access control on individual secret level
Can I configure a Azure's key vault secret per service principal?
I'm used to use GCP's secret manager. There, we can create a secret and give a specific READ permission for one specific service account. I mean, let's say we create a secret ABC and a service account "getsecretaccount", I can give the read permission for this SA called getsecretaccount to access the ABC secret. This getsecretaccount will not have access to any other secret there. Can I achieve this scenario in Azure Key Vault? Thx!!
[ "if you have 1 azure vault with multiple secrets and you create an access policy for a user and select permission like \"get\" and/or \"list\", that user will be able to see all secrets under that vault. no option for separated permission per individual secret\n\n\n", "Authorization\nKey vault offer two different Authorizations: older Vault access policy and newer Azure role-based access control\n\nVault access policy\nBrunoLucasAzure already explained how this works. Personally I don't recommend using vault access policy unless you have some specific reason to use it:\n\nresource re-deployment will reset existing authorization defined in key vault properties. ref: https://learn.microsoft.com/en-us/azure/templates/microsoft.keyvault/vaults?pivots=deployment-language-bicep#resource-format\n\npassing [] to accessPolicies will clear access policy list\npassing null or not using accessPolicies in template will generate error\nof course you can read value of accessPoliciesbefore re-deployment and then re-use existing value when re-deploying resource. TBH that kind of hacks should not be required by resource.\n\n\n\nFor more information: https://docs.microsoft.com/azure/key-vault/general/assign-access-policy-portal\nAzure role-based access control\nAzure role-based access control based key vault authorization uses Azure RBAC assignments for key vault data layer access control.\nThere is multiple ready-made RBAC roles for key vault data layer:\n\nKey Vault Administrator\nKey Vault Secrets Officer\nKey Vault Secrets User\nKey Vault Reader\netc\n\nDocumentation only mentions thins Azure RBAC for key vault also provides the ability to have separate permissions on individual keys, secrets, and certificates\n=> you can add RBAC roles into individual key/secret/certificate\nNote that key/secret/certificate must be created before you can add RBAC to it.\n\nFor more information: https://learn.microsoft.com/en-us/azure/key-vault/general/rbac-guide?tabs=azure-cli\nThe answer\nYou can use both methods:\n\nif you use older vault access policy, you need to create one key vault per service principal\nif you use RBAC based access control, you can manage access control on individual secret level\n\n" ]
[ 0, 0 ]
[]
[]
[ "azure", "azure_keyvault" ]
stackoverflow_0074659858_azure_azure_keyvault.txt
Q: What's the fastest way to loop through an array in JavaScript? I learned from books that you should write for loop like this: for(var i=0, len=arr.length; i < len; i++){ // blah blah } so the arr.length will not be calculated each time. Others say that the compiler will do some optimization to this, so you can just write: for(var i=0; i < arr.length; i++){ // blah blah } I just want to know which is the best way in practice? A: After performing this test with most modern browsers: https://jsben.ch/wY5fo Currently, the fastest form of loop (and in my opinion the most syntactically obvious). A standard for-loop with length caching var i = 0, len = myArray.length; while (i < len) { // your code i++ } I would say, this is definitely a case where I applaud JavaScript engine developers. A runtime should be optimized for clarity, not cleverness. A: The absolute fastest way to loop through a javascript array is: var len = arr.length; while (len--) { // blah blah } See this post for a full comparison A: As of June 2016, doing some tests in latest Chrome (71% of the browser market in May 2016, and increasing): The fastest loop is a for loop, both with and without caching length delivering really similar performance. (The for loop with cached length sometimes delivered better results than the one without caching, but the difference is almost negligible, which means the engine might be already optimized to favor the standard and probably most straightforward for loop without caching). The while loop with decrements was approximately 1.5 times slower than the for loop. A loop using a callback function (like the standard forEach), was approximately 10 times slower than the for loop. I believe this thread is too old and it is misleading programmers to think they need to cache length, or use reverse traversing whiles with decrements to achieve better performance, writing code that is less legible and more prone to errors than a simple straightforward for loop. Therefore, I recommend: If your app iterates over a lot of items or your loop code is inside a function that is used often, a straightforward for loop is the answer: for (var i = 0; i < arr.length; i++) { // Do stuff with arr[i] or i } If your app doesn't really iterate through lots of items or you just need to do small iterations here and there, using the standard forEach callback or any similar function from your JS library of choice might be more understandable and less prone to errors, since index variable scope is closed and you don't need to use brackets, accessing the array value directly: arr.forEach(function(value, index) { // Do stuff with value or index }); If you really need to scratch a few milliseconds while iterating over billions of rows and the length of your array doesn't change through the process, you might consider caching the length in your for loop. Although I think this is really not necessary nowadays: for (var i = 0, len = arr.length; i < len; i++) { // Do stuff with arr[i] } A: It's just 2018 so an update could be nice... And I really have to disagree with the accepted answer. It defers on different browsers. some do forEach faster, some for-loop, and some while here is a benchmark on all method http://jsben.ch/mW36e arr.forEach( a => { // ... } and since you can see alot of for-loop like for(a = 0; ... ) then worth to mention that without 'var' variables will be define globally and this can dramatically affects on speed so it'll get slow. Duff's device run faster on opera but not in firefox var arr = arr = new Array(11111111).fill(255); var benches = [ [ "empty", () => { for(var a = 0, l = arr.length; a < l; a++); }] , ["for-loop", () => { for(var a = 0, l = arr.length; a < l; ++a) var b = arr[a] + 1; }] , ["for-loop++", () => { for(var a = 0, l = arr.length; a < l; a++) var b = arr[a] + 1; }] , ["for-loop - arr.length", () => { for(var a = 0; a < arr.length; ++a ) var b = arr[a] + 1; }] , ["reverse for-loop", () => { for(var a = arr.length - 1; a >= 0; --a ) var b = arr[a] + 1; }] ,["while-loop", () => { var a = 0, l = arr.length; while( a < l ) { var b = arr[a] + 1; ++a; } }] , ["reverse-do-while-loop", () => { var a = arr.length - 1; // CAREFUL do { var b = arr[a] + 1; } while(a--); }] , ["forEach", () => { arr.forEach( a => { var b = a + 1; }); }] , ["for const..in (only 3.3%)", () => { var ar = arr.slice(0,arr.length/33); for( const a in ar ) { var b = a + 1; } }] , ["for let..in (only 3.3%)", () => { var ar = arr.slice(0,arr.length/33); for( let a in ar ) { var b = a + 1; } }] , ["for var..in (only 3.3%)", () => { var ar = arr.slice(0,arr.length/33); for( var a in ar ) { var b = a + 1; } }] , ["Duff's device", () => { var len = arr.length; var i, n = len % 8 - 1; if (n > 0) { do { var b = arr[len-n] + 1; } while (--n); // n must be greater than 0 here } n = (len * 0.125) ^ 0; if (n > 0) { do { i = --n <<3; var b = arr[i] + 1; var c = arr[i+1] + 1; var d = arr[i+2] + 1; var e = arr[i+3] + 1; var f = arr[i+4] + 1; var g = arr[i+5] + 1; var h = arr[i+6] + 1; var k = arr[i+7] + 1; } while (n); // n must be greater than 0 here also } }]]; function bench(title, f) { var t0 = performance.now(); var res = f(); return performance.now() - t0; // console.log(`${title} took ${t1-t0} msec`); } var globalVarTime = bench( "for-loop without 'var'", () => { // Here if you forget to put 'var' so variables'll be global for(a = 0, l = arr.length; a < l; ++a) var b = arr[a] + 1; }); var times = benches.map( function(a) { arr = new Array(11111111).fill(255); return [a[0], bench(...a)] }).sort( (a,b) => a[1]-b[1] ); var max = times[times.length-1][1]; times = times.map( a => {a[2] = (a[1]/max)*100; return a; } ); var template = (title, time, n) => `<div>` + `<span>${title} &nbsp;</span>` + `<span style="width:${3+n/2}%">&nbsp;${Number(time.toFixed(3))}msec</span>` + `</div>`; var strRes = times.map( t => template(...t) ).join("\n") + `<br><br>for-loop without 'var' ${globalVarTime} msec.`; var $container = document.getElementById("container"); $container.innerHTML = strRes; body { color:#fff; background:#333; font-family:helvetica; } body > div > div { clear:both } body > div > div > span { float:left; width:43%; margin:3px 0; text-align:right; } body > div > div > span:nth-child(2) { text-align:left; background:darkorange; animation:showup .37s .111s; -webkit-animation:showup .37s .111s; } @keyframes showup { from { width:0; } } @-webkit-keyframes showup { from { width:0; } } <div id="container"> </div> A: If the order is not important, I prefer this style: for(var i = array.length; i--; ) It caches the length and is much shorter to write. But it will iterate over the array in reverse order. A: 2014 While is back Just think logical. Look at this for( var index = 0 , length = array.length ; index < length ; index++ ) { //do stuff } Need to create at least 2 variables (index,length) Need to check if the index is smaller than the length Need to increase the index the for loop has 3 parameters Now tell me why this should be faster than: var length = array.length; while( --length ) { //or length-- //do stuff } One variable No checks the index is decreased (Machines prefer that) while has only one parameter I was totally confused when Chrome 28 showed that the for loop is faster than the while. This must have ben some sort of "Uh, everyone is using the for loop, let's focus on that when developing for chrome." But now, in 2014 the while loop is back on chrome. it's 2 times faster , on other/older browsers it was always faster. Lately i made some new tests. Now in real world envoirement those short codes are worth nothing and jsperf can't actually execute properly the while loop, because it needs to recreate the array.length which also takes time. you CAN'T get the actual speed of a while loop on jsperf. you need to create your own custom function and check that with window.performance.now() And yeah... there is no way the while loop is simply faster. The real problem is actually the dom manipulation / rendering time / drawing time or however you wanna call it. For example i have a canvas scene where i need to calculate the coordinates and collisions... this is done between 10-200 MicroSeconds (not milliseconds). it actually takes various milliseconds to render everything.Same as in DOM. BUT There is another super performant way using the for loop in some cases... for example to copy/clone an array for( var i = array.length ; i > 0 ; arrayCopy[ --i ] = array[ i ] // doing stuff ); Notice the setup of the parameters: Same as in the while loop i'm using only one variable Need to check if the index is bigger than 0; As you can see this approach is different vs the normal for loop everyone uses, as i do stuff inside the 3th parameter and i also decrease directly inside the array. Said that, this confirms that machines like the -- writing that i was thinking to make it a little shorter and remove some useless stuff and wrote this one using the same style: for( var i = array.length ; i-- ; arrayCopy[ i ] = array[ i ] // doing stuff ); Even if it's shorter it looks like using i one more time slows down everything. It's 1/5 slower than the previous for loop and the while one. Note: the ; is very important after the for looo without {} Even if i just told you that jsperf is not the best way to test scripts .. i added this 2 loops here http://jsperf.com/caching-array-length/40 And here is another answer about performance in javascript https://stackoverflow.com/a/21353032/2450730 This answer is to show performant ways of writing javascript. So if you can't read that, ask and you will get an answer or read a book about javascript http://www.ecma-international.org/ecma-262/5.1/ A: http://jsperf.com/caching-array-length/60 The latest revision of test, which I prepared (by reusing older one), shows one thing. Caching length is not that much important, but it does not harm. Every first run of the test linked above (on freshly opened tab) gives best results for the last 4 snippets (3rd, 5th, 7th and 10th in charts) in Chrome, Opera and Firefox in my Debian Squeeze 64-bit (my desktop hardware). Subsequent runs give quite different result. Performance-wise conclusions are simple: Go with for loop (forward) and test using !== instead of <. If you don't have to reuse the array later, then while loop on decremented length and destructive shift()-ing array is also efficient. tl;dr Nowadays (2011.10) below pattern looks to be the fastest one. for (var i = 0, len = arr.length; i !== len; i++) { ... } Mind that caching arr.length is not crucial here, so you can just test for i !== arr.length and performance won't drop, but you'll get shorter code. PS: I know that in snippet with shift() its result could be used instead of accessing 0th element, but I somehow overlooked that after reusing previous revision (which had wrong while loops), and later I didn't want to lose already obtained results. A: "Best" as in pure performance? or performance AND readability? Pure performance "best" is this, which uses a cache and the ++prefix operator (my data: http://jsperf.com/caching-array-length/189) for (var i = 0, len = myArray.length; i < len; ++i) { // blah blah } I would argue that the cache-less for-loop is the best balance in execution time and programmer reading time. Every programmer that started with C/C++/Java won't waste a ms having to read through this one for(var i=0; i < arr.length; i++){ // blah blah } A: **cache the array length inside the loop ,some seconds of time will be eluded . Depends on the items in the array if there are more items in array there is major difference with respect to Ms of time* ** sArr; //Array[158]; for(var i = 0 ; i <sArr.length ; i++) { callArray(sArr[i]); //function call } ***end: 6.875ms*** ** ** sArr; //Array[158]; for(var i = 0,len = sArr.length ; i < len ; i++) { callArray(sArr[i]); //function call } ***end: 1.354ms*** ** A: This looks to be the fastest way by far... var el; while (el = arr.shift()) { el *= 2; } Take into account that this will consume the array, eating it, and leaving nothing left... A: It's the year 2017. I made some tests. https://jsperf.com/fastest-way-to-iterate-through-an-array/ Looks like the while method is the fastest on Chrome. Looks like the left decrement (--i) is much faster than the others (++i, i--, i++) on Firefox. This approach is the fasted on average. But it iterates the array in reversed order. let i = array.length; while (--i >= 0) { doSomething(array[i]); } If the forward order is important, use this approach. let ii = array.length; let i = 0; while (i < ii) { doSomething(array[i]); ++i; } A: I have a test all way in here. Check this! https://gist.github.com/DungGramer/7efdfefecaa1b8f5d6510202524dc751 My result: pop is fastest, forEach is goodest for readable and fast do..while faster than for loop faster if no have condition (like if) --i is fastest A: I'm always write in the first style. Even if a compiler is smart enough to optimize it for arrays, but still it smart if we are using DOMNodeList here or some complicated object with calculated length? I know what the question is about arrays, but i think it is a good practice to write all your loops in one style. A: var arr = []; // The array var i = 0; while (i < arr.length) { // Do something with arr[i] i++; } i++ is faster than ++i, --i and i-- Also, you can save the last line doing arr[i++] the last time you need to access i (but this can be hard to debug). You can test it here (with other loop tests): http://jsperf.com/for-vs-whilepop/5 A: As of September 2017 these jsperf tests are showing the following pattern to be most performant on Chrome 60: function foo(x) { x; }; arr.forEach(foo); Is anyone able to reproduce? A: Fastest approach is the traditional for loop. Here is a more comprehensive performance comparison. https://gists.cwidanage.com/2019/11/how-to-iterate-over-javascript-arrays.html A: If you want a faster for loop, define your variables outside the loop and use below syntax const iMax = lengthOftheLoop; var i = 0; for (; i < iMax; i++) { console.log("loop"+i); } reference: https://medium.com/kbdev/voyage-to-the-most-efficient-loop-in-nodejs-and-a-bit-js-5961d4524c2e A: although it is a very old question, it is a very interesting one, pardon me for tweaking the question a little bit but I am going to answer it at the end. the question made me ask myself if there are any better methods for looping in js: so I have made some tests and here is what I found out: for 1000_000 record: the best is forEach. for 100 records: it simply doesn't matter. to go back to your question: the example i created is not exactly like the question .. but i found out some interesting things: firstly : like what you said , the arr.length will evaluate every time if it is within the comparison statement i < arr.length ... note : the variable of arrLength below is not more than the a number of 1000_000 records.. for instance : this wont work but this will and it will take .036 seconds .. which is very large compared to what it takes if the number was constant... to sum up, it is better to use FOREACH in your case: the i<arr.length should take more time ( around 1.3 the usual) see the tests : see the tests A: I have tried some other ways to iterate a huge array and found out that halving the array length and then iterating both halves in a single loop is faster. This performance difference can be seen while processing huge arrays. var firstHalfLen =0; var secondHalfLen = 0; var count2=0; var searchterm = "face"; var halfLen = arrayLength/2; if(arrayLength%2==halfLen) { firstHalfLen = Math.ceil(halfLen); secondHalfLen=Math.floor(halfLen); } else { firstHalfLen=halfLen; secondHalfLen=halfLen; } for(var firstHalfCOunter=0,secondHalfCounter = arrayLength-secondHalfLen; firstHalfCOunter < firstHalfLen; firstHalfCOunter++) { if(mainArray[firstHalfCOunter].search(new RegExp(searchterm, "i"))> -1) { count2+=1; } if(secondHalfCounter < arrayLength) { if(mainArray[secondHalfCounter].search(new RegExp(searchterm, "i"))> -1) { count2+=1; } secondHalfCounter++; } } Some performance comparison (using timer.js) between the cached length for-loop VS the above method. http://jsfiddle.net/tejzpr/bbLgzxgo/ A: Another jsperf.com test: http://jsperf.com/while-reverse-vs-for-cached-length The reverse while loop seems to be the fastest. Only problem is that while (--i) will stop at 0. How can I access array[0] in my loop then? A: A basic while loop is often the fastest. jsperf.com is a great sandbox to test these types of concepts. https://jsperf.com/fastest-array-loops-in-javascript/24 A: Benchmarking [10000000] element array... The fastest [for ++] took [76762166ns] ┌─────────┬───────────────────────┬────────────┬──────────┐ │ (index) │ type │ time[ns] │ baseline │ ├─────────┼───────────────────────┼────────────┼──────────┤ │ 0 │ 'for ++' │ 76762166 │ 1 │ │ 1 │ 'for of' │ 82407583 │ 1.07 │ │ 2 │ '--while forward' │ 83723083 │ 1.09 │ │ 3 │ 'do while forward --' │ 83942958 │ 1.09 │ │ 4 │ '--do while forward' │ 84225584 │ 1.1 │ │ 5 │ 'while forward --' │ 85156957 │ 1.11 │ │ 6 │ '--while >= 0' │ 89745916 │ 1.17 │ │ 7 │ '++ do while' │ 90306542 │ 1.18 │ │ 8 │ 'for !== ++' │ 90319083 │ 1.18 │ │ 9 │ '-- for' │ 90360167 │ 1.18 │ │ 10 │ 'for i length --' │ 90558833 │ 1.18 │ │ 11 │ '++ for' │ 90616125 │ 1.18 │ │ 12 │ 'do while ++' │ 90657541 │ 1.18 │ │ 13 │ '--for i length' │ 90757708 │ 1.18 │ │ 14 │ 'for --' │ 90799875 │ 1.18 │ │ 15 │ '++ while' │ 92697417 │ 1.21 │ │ 16 │ '++ for !==' │ 94488209 │ 1.23 │ │ 17 │ 'pop' │ 108399917 │ 1.41 │ │ 18 │ 'while ++' │ 109276500 │ 1.42 │ │ 19 │ 'forEach call' │ 147140124 │ 1.92 │ │ 20 │ 'forEach' │ 148886207 │ 1.94 │ │ 21 │ 'map' │ 207100583 │ 2.7 │ │ 22 │ 'Array from' │ 353166207 │ 4.6 │ │ 23 │ 'flatMap' │ 1213152082 │ 15.8 │ │ 24 │ 'Object.keys map' │ 1294475333 │ 16.86 │ │ 25 │ 'for in' │ 1338988749 │ 17.44 │ └─────────┴───────────────────────┴────────────┴──────────┘ Tested on Macbook Air M1 2020. NodeJS 18. For arrays with 10_000_000 elements, the standard for loop wins. For other cases see my gist: https://gist.github.com/episage/076ded007d0583f6a275f93a8c9c8047#file-result-txt Shoutout to @DungGramer. I fixed bugs and enhanced his benchmark.
What's the fastest way to loop through an array in JavaScript?
I learned from books that you should write for loop like this: for(var i=0, len=arr.length; i < len; i++){ // blah blah } so the arr.length will not be calculated each time. Others say that the compiler will do some optimization to this, so you can just write: for(var i=0; i < arr.length; i++){ // blah blah } I just want to know which is the best way in practice?
[ "After performing this test with most modern browsers:\nhttps://jsben.ch/wY5fo\nCurrently, the fastest form of loop (and in my opinion the most syntactically obvious).\nA standard for-loop with length caching\n var i = 0, len = myArray.length;\n while (i < len) {\n // your code\n i++\n }\n\nI would say, this is definitely a case where I applaud JavaScript engine developers. A runtime should be optimized for clarity, not cleverness.\n", "The absolute fastest way to loop through a javascript array is:\nvar len = arr.length;\nwhile (len--) {\n // blah blah\n}\n\nSee this post for a full comparison\n", "As of June 2016, doing some tests in latest Chrome (71% of the browser market in May 2016, and increasing):\n\nThe fastest loop is a for loop, both with and without caching length delivering really similar performance. (The for loop with cached length sometimes delivered better results than the one without caching, but the difference is almost negligible, which means the engine might be already optimized to favor the standard and probably most straightforward for loop without caching).\nThe while loop with decrements was approximately 1.5 times slower than the for loop.\nA loop using a callback function (like the standard forEach), was approximately 10 times slower than the for loop.\n\nI believe this thread is too old and it is misleading programmers to think they need to cache length, or use reverse traversing whiles with decrements to achieve better performance, writing code that is less legible and more prone to errors than a simple straightforward for loop. Therefore, I recommend:\n\nIf your app iterates over a lot of items or your loop code is inside a function that is used often, a straightforward for loop is the answer:\nfor (var i = 0; i < arr.length; i++) {\n // Do stuff with arr[i] or i\n}\n\nIf your app doesn't really iterate through lots of items or you just need to do small iterations here and there, using the standard forEach callback or any similar function from your JS library of choice might be more understandable and less prone to errors, since index variable scope is closed and you don't need to use brackets, accessing the array value directly:\narr.forEach(function(value, index) {\n // Do stuff with value or index\n});\n\nIf you really need to scratch a few milliseconds while iterating over billions of rows and the length of your array doesn't change through the process, you might consider caching the length in your for loop. Although I think this is really not necessary nowadays:\nfor (var i = 0, len = arr.length; i < len; i++) {\n // Do stuff with arr[i]\n}\n\n\n", "It's just 2018 so an update could be nice...\nAnd I really have to disagree with the accepted answer.\nIt defers on different browsers. some do forEach faster, some for-loop, and some while\nhere is a benchmark on all method http://jsben.ch/mW36e\narr.forEach( a => {\n // ...\n}\n\nand since you can see alot of for-loop like for(a = 0; ... ) then worth to mention that without 'var' variables will be define globally and this can dramatically affects on speed so it'll get slow.\nDuff's device run faster on opera but not in firefox\n\n\nvar arr = arr = new Array(11111111).fill(255);\r\nvar benches = \r\n[ [ \"empty\", () => {\r\n for(var a = 0, l = arr.length; a < l; a++);\r\n}]\r\n, [\"for-loop\", () => {\r\n for(var a = 0, l = arr.length; a < l; ++a)\r\n var b = arr[a] + 1;\r\n}]\r\n, [\"for-loop++\", () => {\r\n for(var a = 0, l = arr.length; a < l; a++)\r\n var b = arr[a] + 1;\r\n}]\r\n, [\"for-loop - arr.length\", () => {\r\n for(var a = 0; a < arr.length; ++a )\r\n var b = arr[a] + 1;\r\n}]\r\n, [\"reverse for-loop\", () => {\r\n for(var a = arr.length - 1; a >= 0; --a )\r\n var b = arr[a] + 1;\r\n}]\r\n,[\"while-loop\", () => {\r\n var a = 0, l = arr.length;\r\n while( a < l ) {\r\n var b = arr[a] + 1;\r\n ++a;\r\n }\r\n}]\r\n, [\"reverse-do-while-loop\", () => {\r\n var a = arr.length - 1; // CAREFUL\r\n do {\r\n var b = arr[a] + 1;\r\n } while(a--); \r\n}]\r\n, [\"forEach\", () => {\r\n arr.forEach( a => {\r\n var b = a + 1;\r\n });\r\n}]\r\n, [\"for const..in (only 3.3%)\", () => {\r\n var ar = arr.slice(0,arr.length/33);\r\n for( const a in ar ) {\r\n var b = a + 1;\r\n }\r\n}]\r\n, [\"for let..in (only 3.3%)\", () => {\r\n var ar = arr.slice(0,arr.length/33);\r\n for( let a in ar ) {\r\n var b = a + 1;\r\n }\r\n}]\r\n, [\"for var..in (only 3.3%)\", () => {\r\n var ar = arr.slice(0,arr.length/33);\r\n for( var a in ar ) {\r\n var b = a + 1;\r\n }\r\n}]\r\n, [\"Duff's device\", () => {\r\n var len = arr.length;\r\n var i, n = len % 8 - 1;\r\n\r\n if (n > 0) {\r\n do {\r\n var b = arr[len-n] + 1;\r\n } while (--n); // n must be greater than 0 here\r\n }\r\n n = (len * 0.125) ^ 0;\r\n if (n > 0) { \r\n do {\r\n i = --n <<3;\r\n var b = arr[i] + 1;\r\n var c = arr[i+1] + 1;\r\n var d = arr[i+2] + 1;\r\n var e = arr[i+3] + 1;\r\n var f = arr[i+4] + 1;\r\n var g = arr[i+5] + 1;\r\n var h = arr[i+6] + 1;\r\n var k = arr[i+7] + 1;\r\n }\r\n while (n); // n must be greater than 0 here also\r\n }\r\n}]];\r\nfunction bench(title, f) {\r\n var t0 = performance.now();\r\n var res = f();\r\n return performance.now() - t0; // console.log(`${title} took ${t1-t0} msec`);\r\n}\r\nvar globalVarTime = bench( \"for-loop without 'var'\", () => {\r\n // Here if you forget to put 'var' so variables'll be global\r\n for(a = 0, l = arr.length; a < l; ++a)\r\n var b = arr[a] + 1;\r\n});\r\nvar times = benches.map( function(a) {\r\n arr = new Array(11111111).fill(255);\r\n return [a[0], bench(...a)]\r\n }).sort( (a,b) => a[1]-b[1] );\r\nvar max = times[times.length-1][1];\r\ntimes = times.map( a => {a[2] = (a[1]/max)*100; return a; } );\r\nvar template = (title, time, n) =>\r\n `<div>` +\r\n `<span>${title} &nbsp;</span>` +\r\n `<span style=\"width:${3+n/2}%\">&nbsp;${Number(time.toFixed(3))}msec</span>` +\r\n `</div>`;\r\n\r\nvar strRes = times.map( t => template(...t) ).join(\"\\n\") + \r\n `<br><br>for-loop without 'var' ${globalVarTime} msec.`;\r\nvar $container = document.getElementById(\"container\");\r\n$container.innerHTML = strRes;\nbody { color:#fff; background:#333; font-family:helvetica; }\r\nbody > div > div { clear:both }\r\nbody > div > div > span {\r\n float:left;\r\n width:43%;\r\n margin:3px 0;\r\n text-align:right;\r\n}\r\nbody > div > div > span:nth-child(2) {\r\n text-align:left;\r\n background:darkorange;\r\n animation:showup .37s .111s;\r\n -webkit-animation:showup .37s .111s;\r\n}\r\n@keyframes showup { from { width:0; } }\r\n@-webkit-keyframes showup { from { width:0; } }\n<div id=\"container\"> </div>\n\n\n\n", "If the order is not important, I prefer this style:\nfor(var i = array.length; i--; )\n\nIt caches the length and is much shorter to write. But it will iterate over the array in reverse order.\n", "2014 While is back\nJust think logical.\nLook at this \nfor( var index = 0 , length = array.length ; index < length ; index++ ) {\n\n //do stuff\n\n}\n\n\nNeed to create at least 2 variables (index,length)\nNeed to check if the index is smaller than the length\nNeed to increase the index\nthe for loop has 3 parameters\n\nNow tell me why this should be faster than:\nvar length = array.length;\n\nwhile( --length ) { //or length--\n\n //do stuff\n\n}\n\n\nOne variable\nNo checks\nthe index is decreased (Machines prefer that)\nwhile has only one parameter\n\nI was totally confused when Chrome 28 showed that the for loop is faster than the while.\nThis must have ben some sort of \n\"Uh, everyone is using the for loop, let's focus on that when\n developing for chrome.\"\nBut now, in 2014 the while loop is back on chrome. it's 2 times faster , on other/older browsers it was always faster.\nLately i made some new tests. Now in real world envoirement those short codes are worth nothing and jsperf can't actually execute properly the while loop, because it needs to recreate the array.length which also takes time.\nyou CAN'T get the actual speed of a while loop on jsperf.\nyou need to create your own custom function and check that with window.performance.now()\nAnd yeah... there is no way the while loop is simply faster.\n\nThe real problem is actually the dom manipulation / rendering time /\n drawing time or however you wanna call it.\n\nFor example i have a canvas scene where i need to calculate the coordinates and collisions... this is done between 10-200 MicroSeconds (not milliseconds). it actually takes various milliseconds to render everything.Same as in DOM.\nBUT\nThere is another super performant way using the for loop in some cases... for example to copy/clone an array\nfor(\n var i = array.length ;\n i > 0 ;\n arrayCopy[ --i ] = array[ i ] // doing stuff\n);\n\nNotice the setup of the parameters:\n\nSame as in the while loop i'm using only one variable\nNeed to check if the index is bigger than 0;\nAs you can see this approach is different vs the normal for loop everyone uses, as i do stuff inside the 3th parameter and i also decrease directly inside the array.\n\nSaid that, this confirms that machines like the --\nwriting that i was thinking to make it a little shorter and remove some useless stuff and wrote this one using the same style:\nfor(\n var i = array.length ;\n i-- ;\n arrayCopy[ i ] = array[ i ] // doing stuff\n);\n\nEven if it's shorter it looks like using i one more time slows down everything.\nIt's 1/5 slower than the previous for loop and the while one.\nNote: the ; is very important after the for looo without {}\nEven if i just told you that jsperf is not the best way to test scripts .. i added this 2 loops here\nhttp://jsperf.com/caching-array-length/40\nAnd here is another answer about performance in javascript\nhttps://stackoverflow.com/a/21353032/2450730\nThis answer is to show performant ways of writing javascript. So if you can't read that, ask and you will get an answer or read a book about javascript http://www.ecma-international.org/ecma-262/5.1/\n", "http://jsperf.com/caching-array-length/60\nThe latest revision of test, which I prepared (by reusing older one), shows one thing.\nCaching length is not that much important, but it does not harm.\nEvery first run of the test linked above (on freshly opened tab) gives best results for the last 4 snippets (3rd, 5th, 7th and 10th in charts) in Chrome, Opera and Firefox in my Debian Squeeze 64-bit (my desktop hardware). Subsequent runs give quite different result.\nPerformance-wise conclusions are simple:\n\nGo with for loop (forward) and test using !== instead of <.\nIf you don't have to reuse the array later, then while loop on decremented length and destructive shift()-ing array is also efficient.\n\n\ntl;dr\nNowadays (2011.10) below pattern looks to be the fastest one.\nfor (var i = 0, len = arr.length; i !== len; i++) {\n ...\n}\n\nMind that caching arr.length is not crucial here, so you can just test for i !== arr.length and performance won't drop, but you'll get shorter code.\n\nPS: I know that in snippet with shift() its result could be used instead of accessing 0th element, but I somehow overlooked that after reusing previous revision (which had wrong while loops), and later I didn't want to lose already obtained results.\n", "\"Best\" as in pure performance? or performance AND readability?\nPure performance \"best\" is this, which uses a cache and the ++prefix operator (my data: http://jsperf.com/caching-array-length/189)\nfor (var i = 0, len = myArray.length; i < len; ++i) {\n // blah blah\n}\n\nI would argue that the cache-less for-loop is the best balance in execution time and programmer reading time. Every programmer that started with C/C++/Java won't waste a ms having to read through this one\nfor(var i=0; i < arr.length; i++){\n // blah blah\n}\n\n", "**cache the array length inside the loop ,some seconds of time will be eluded . Depends on the items in the array if there are more items in array there is major difference with respect to Ms of time* \n**\nsArr; //Array[158];\n\nfor(var i = 0 ; i <sArr.length ; i++) {\n callArray(sArr[i]); //function call\n}\n\n***end: 6.875ms***\n\n** \n**\nsArr; //Array[158];\nfor(var i = 0,len = sArr.length ; i < len ; i++) {\n callArray(sArr[i]); //function call\n}\n\n***end: 1.354ms***\n\n**\n", "This looks to be the fastest way by far...\nvar el;\nwhile (el = arr.shift()) {\n el *= 2;\n}\n\nTake into account that this will consume the array, eating it, and leaving nothing left...\n", "It's the year 2017.\nI made some tests.\nhttps://jsperf.com/fastest-way-to-iterate-through-an-array/\nLooks like the while method is the fastest on Chrome.\nLooks like the left decrement (--i) is much faster than the others (++i, i--, i++) on Firefox.\nThis approach is the fasted on average. But it iterates the array in reversed order.\nlet i = array.length;\nwhile (--i >= 0) {\n doSomething(array[i]);\n}\n\nIf the forward order is important, use this approach.\nlet ii = array.length;\nlet i = 0;\nwhile (i < ii) {\n doSomething(array[i]);\n ++i;\n}\n\n", "I have a test all way in here. Check this!\nhttps://gist.github.com/DungGramer/7efdfefecaa1b8f5d6510202524dc751\nMy result:\n\npop is fastest, forEach is goodest for readable and fast\ndo..while faster than for\nloop faster if no have condition (like if)\n--i is fastest\n\n", "I'm always write in the first style. \nEven if a compiler is smart enough to optimize it for arrays, but still it smart if we are using DOMNodeList here or some complicated object with calculated length?\nI know what the question is about arrays, but i think it is a good practice to write all your loops in one style.\n", "var arr = []; // The array\nvar i = 0;\nwhile (i < arr.length) {\n // Do something with arr[i]\n i++;\n}\n\ni++ is faster than ++i, --i and i--\nAlso, you can save the last line doing arr[i++] the last time you need to access i (but this can be hard to debug).\nYou can test it here (with other loop tests): http://jsperf.com/for-vs-whilepop/5\n", "As of September 2017 these jsperf tests are showing the following pattern to be most performant on Chrome 60:\nfunction foo(x) {\n x;\n};\narr.forEach(foo);\n\nIs anyone able to reproduce?\n", "Fastest approach is the traditional for loop. Here is a more comprehensive performance comparison. \nhttps://gists.cwidanage.com/2019/11/how-to-iterate-over-javascript-arrays.html\n", "If you want a faster for loop, define your variables outside the loop and use below syntax\n const iMax = lengthOftheLoop;\n var i = 0;\n for (; i < iMax; i++) {\n console.log(\"loop\"+i);\n }\n\nreference: https://medium.com/kbdev/voyage-to-the-most-efficient-loop-in-nodejs-and-a-bit-js-5961d4524c2e\n", "although it is a very old question, it is a very interesting one,\npardon me for tweaking the question a little bit but I am going to answer it at the end.\nthe question made me ask myself if there are any better methods for looping in js:\nso I have made some tests and here is what I found out:\n\nfor 1000_000 record: the best is forEach.\nfor 100 records: it simply doesn't matter.\n\nto go back to your question:\nthe example i created is not exactly like the question .. but i found out some interesting things:\nfirstly : like what you said , the arr.length will evaluate every time if it is within the comparison statement i < arr.length ...\nnote : the variable of arrLength below is not more than the a number of 1000_000 records..\n\nfor instance : this wont work\n\nbut this will\n\nand it will take .036 seconds .. which is very large compared to what it takes if the number was constant...\n\nto sum up,\nit is better to use FOREACH\nin your case: the i<arr.length should take more time ( around 1.3 the usual)\nsee the tests :\nsee the tests\n", "I have tried some other ways to iterate a huge array and found out that halving the array length and then iterating both halves in a single loop is faster. This performance difference can be seen while processing huge arrays. \nvar firstHalfLen =0;\nvar secondHalfLen = 0;\nvar count2=0;\nvar searchterm = \"face\";\nvar halfLen = arrayLength/2;\nif(arrayLength%2==halfLen)\n{\n firstHalfLen = Math.ceil(halfLen);\n secondHalfLen=Math.floor(halfLen);\n}\nelse\n{\n firstHalfLen=halfLen;\n secondHalfLen=halfLen;\n}\nfor(var firstHalfCOunter=0,secondHalfCounter = arrayLength-secondHalfLen;\n firstHalfCOunter < firstHalfLen;\n firstHalfCOunter++)\n{\n if(mainArray[firstHalfCOunter].search(new RegExp(searchterm, \"i\"))> -1)\n {\n count2+=1;\n }\n if(secondHalfCounter < arrayLength)\n {\n if(mainArray[secondHalfCounter].search(new RegExp(searchterm, \"i\"))> -1)\n {\n count2+=1;\n }\n secondHalfCounter++; \n }\n}\n\nSome performance comparison (using timer.js) between the cached length for-loop VS the above method.\nhttp://jsfiddle.net/tejzpr/bbLgzxgo/\n", "Another jsperf.com test: http://jsperf.com/while-reverse-vs-for-cached-length\nThe reverse while loop seems to be the fastest. Only problem is that while (--i) will stop at 0. How can I access array[0] in my loop then?\n", "A basic while loop is often the fastest. jsperf.com is a great sandbox to test these types of concepts.\nhttps://jsperf.com/fastest-array-loops-in-javascript/24\n", "Benchmarking [10000000] element array...\nThe fastest [for ++] took [76762166ns]\n┌─────────┬───────────────────────┬────────────┬──────────┐\n│ (index) │ type │ time[ns] │ baseline │\n├─────────┼───────────────────────┼────────────┼──────────┤\n│ 0 │ 'for ++' │ 76762166 │ 1 │\n│ 1 │ 'for of' │ 82407583 │ 1.07 │\n│ 2 │ '--while forward' │ 83723083 │ 1.09 │\n│ 3 │ 'do while forward --' │ 83942958 │ 1.09 │\n│ 4 │ '--do while forward' │ 84225584 │ 1.1 │\n│ 5 │ 'while forward --' │ 85156957 │ 1.11 │\n│ 6 │ '--while >= 0' │ 89745916 │ 1.17 │\n│ 7 │ '++ do while' │ 90306542 │ 1.18 │\n│ 8 │ 'for !== ++' │ 90319083 │ 1.18 │\n│ 9 │ '-- for' │ 90360167 │ 1.18 │\n│ 10 │ 'for i length --' │ 90558833 │ 1.18 │\n│ 11 │ '++ for' │ 90616125 │ 1.18 │\n│ 12 │ 'do while ++' │ 90657541 │ 1.18 │\n│ 13 │ '--for i length' │ 90757708 │ 1.18 │\n│ 14 │ 'for --' │ 90799875 │ 1.18 │\n│ 15 │ '++ while' │ 92697417 │ 1.21 │\n│ 16 │ '++ for !==' │ 94488209 │ 1.23 │\n│ 17 │ 'pop' │ 108399917 │ 1.41 │\n│ 18 │ 'while ++' │ 109276500 │ 1.42 │\n│ 19 │ 'forEach call' │ 147140124 │ 1.92 │\n│ 20 │ 'forEach' │ 148886207 │ 1.94 │\n│ 21 │ 'map' │ 207100583 │ 2.7 │\n│ 22 │ 'Array from' │ 353166207 │ 4.6 │\n│ 23 │ 'flatMap' │ 1213152082 │ 15.8 │\n│ 24 │ 'Object.keys map' │ 1294475333 │ 16.86 │\n│ 25 │ 'for in' │ 1338988749 │ 17.44 │\n└─────────┴───────────────────────┴────────────┴──────────┘\n\nTested on Macbook Air M1 2020. NodeJS 18.\nFor arrays with 10_000_000 elements, the standard for loop wins. For other cases see my gist: https://gist.github.com/episage/076ded007d0583f6a275f93a8c9c8047#file-result-txt\nShoutout to @DungGramer. I fixed bugs and enhanced his benchmark.\n" ]
[ 406, 107, 60, 55, 42, 21, 10, 10, 7, 7, 4, 3, 2, 1, 1, 1, 1, 1, 0, 0, 0, 0 ]
[ "The most elegant solution I know of is using map.\nvar arr = [1,2,3];\narr.map(function(input){console.log(input);});\n\n", "Try this:\nvar myarray =[],\ni = myarray.lenght;\nwhile(i--){\n// do somthing\n}\n\n", "As of 2019 WebWorker has been more popular, for large datasets, we can use WebWorker to process much much faster by fully utilize multi-core processors.\nWe also have Parallel.js which make WebWorker much easier to use for data processing.\n", "It is late 2022\nSometimes is ineffective calculate with array length at all:\nIf you have an array with indexes var myArray = [1,2,3,4,25,999999999999] it is very slow to use any solution which is going trough array with increasing index by one (including forEach).\nBest solution for me, and it is not mentioned in any top-voted answer is:\nfor(let _i in myArray ) {\n if(myArray[_i]) {\n (function(s) {\n ///\n })(myArray[_i]) \n }\n}\n\n" ]
[ -2, -2, -2, -2 ]
[ "javascript", "loops", "performance" ]
stackoverflow_0005349425_javascript_loops_performance.txt
Q: How to display a field from reference model I want to get the name of the user, using the Reserves model, because that's the one I can display. The **User ** model is referenced in the **Reserve ** model. and the only thing I can get is the Object ID. How can I get other field values from the User model through the reference? Here is the code for the user model: const mongoose = require('mongoose') const userSchema = mongoose.Schema({ name: { type: String, required: [true, 'Please add a name'] }, idnum: { type: String, required: [true, 'Please add an id number'], unique: true }, password: { type: String, required: [true, 'Please add a password'] }, role: { type: String, required: [true, 'Please select a role'], enum: ["Faculty", "Student Officer", "Admin"] }, org: { type: String, required: [true, 'Please add an organization'], }, dept: { type: String, required: [true, 'Please add a deparment'], }, }, { timestamps: true }) module.exports = mongoose.model('User', userSchema) Here is the code for the Reserve model: const mongoose = require('mongoose') const reserveSchema = mongoose.Schema({ user: { type: mongoose.Schema.Types.ObjectId, required: true, ref: 'Users' }, purpose: { type: String, required: [true, 'Please add a purpose'] }, dept: { type: String, required: [true, 'Please choose a department'] }, org: { type: String, required: [true, 'Please choose an organization'] }, bldg: { type: String, required: [true, 'Please choose a building'] }, room: { type: String, required: [true, 'Please add a room'] }, date: { type: String, required: [true, 'Please add a date'] }, time_in: { type: String, required: [true, 'Please add a starting time'] }, time_out: { type: String, required: [true, 'Please add an ending time'] }, status: { type: String, }, }, { timestamps: true, }) module.exports = mongoose.model('Reserve', reserveSchema) Then, here is the code in fetching the from the database, using the model function ReservesContent({reserves}) { return ( <div class='info-container'> <p id='requestor'>{reserves.user}</p> <p id='purpose'>{reserves.purpose}</p> <p id='building'>{reserves.bldg}</p> <p id='room'>{reserves.room}</p> <p id='time_in'>{reserves.time_in}</p> <p id='time_out'>{reserves.time_out}</p> </div> ); } export default ReservesContent {reserves.length > 0 ? ( <div> {reserves.map((reserve) => ( <ReservesContent key={reserve._id} reserves={reserve} /> ))} </div> ) : (<h3>No Reservations Found</h3>)} A: For that to work you need to populate the user reference Here's some psudo code that demonstrates how to populate a field: Reserve.find({ /* Insert serach criteria or nothing at all */ }).populate('user'); I hope this helps! Docs: https://mongoosejs.com/docs/populate.html A: I did some research and I found a way using Schema Virtuals I added the requestor on my reserve.model file const reserveSchema = mongoose.Schema({ user: { type: mongoose.Schema.Types.ObjectId, required: true, ref: 'User' }, requestor: { type: String, required: true }, then I added a Schema.virtual reserveSchema.virtual('requestor-name', { ref: 'User', localField: 'requestor', foreignField: 'name', },) i also added that requestor field in my reserve.controller : requestor:req.user.name
How to display a field from reference model
I want to get the name of the user, using the Reserves model, because that's the one I can display. The **User ** model is referenced in the **Reserve ** model. and the only thing I can get is the Object ID. How can I get other field values from the User model through the reference? Here is the code for the user model: const mongoose = require('mongoose') const userSchema = mongoose.Schema({ name: { type: String, required: [true, 'Please add a name'] }, idnum: { type: String, required: [true, 'Please add an id number'], unique: true }, password: { type: String, required: [true, 'Please add a password'] }, role: { type: String, required: [true, 'Please select a role'], enum: ["Faculty", "Student Officer", "Admin"] }, org: { type: String, required: [true, 'Please add an organization'], }, dept: { type: String, required: [true, 'Please add a deparment'], }, }, { timestamps: true }) module.exports = mongoose.model('User', userSchema) Here is the code for the Reserve model: const mongoose = require('mongoose') const reserveSchema = mongoose.Schema({ user: { type: mongoose.Schema.Types.ObjectId, required: true, ref: 'Users' }, purpose: { type: String, required: [true, 'Please add a purpose'] }, dept: { type: String, required: [true, 'Please choose a department'] }, org: { type: String, required: [true, 'Please choose an organization'] }, bldg: { type: String, required: [true, 'Please choose a building'] }, room: { type: String, required: [true, 'Please add a room'] }, date: { type: String, required: [true, 'Please add a date'] }, time_in: { type: String, required: [true, 'Please add a starting time'] }, time_out: { type: String, required: [true, 'Please add an ending time'] }, status: { type: String, }, }, { timestamps: true, }) module.exports = mongoose.model('Reserve', reserveSchema) Then, here is the code in fetching the from the database, using the model function ReservesContent({reserves}) { return ( <div class='info-container'> <p id='requestor'>{reserves.user}</p> <p id='purpose'>{reserves.purpose}</p> <p id='building'>{reserves.bldg}</p> <p id='room'>{reserves.room}</p> <p id='time_in'>{reserves.time_in}</p> <p id='time_out'>{reserves.time_out}</p> </div> ); } export default ReservesContent {reserves.length > 0 ? ( <div> {reserves.map((reserve) => ( <ReservesContent key={reserve._id} reserves={reserve} /> ))} </div> ) : (<h3>No Reservations Found</h3>)}
[ "For that to work you need to populate the user reference\nHere's some psudo code that demonstrates how to populate a field:\nReserve.find({ /* Insert serach criteria or nothing at all */ }).populate('user');\n\nI hope this helps!\nDocs: https://mongoosejs.com/docs/populate.html\n", "I did some research and I found a way using Schema Virtuals\nI added the requestor on my reserve.model file\nconst reserveSchema = mongoose.Schema({\nuser: {\n type: mongoose.Schema.Types.ObjectId,\n required: true,\n ref: 'User'\n},\nrequestor: {\n type: String,\n required: true\n},\n\nthen I added a Schema.virtual\nreserveSchema.virtual('requestor-name', {\nref: 'User',\nlocalField: 'requestor',\nforeignField: 'name',\n},)\n\ni also added that requestor field in my reserve.controller : requestor:req.user.name\n" ]
[ 0, 0 ]
[]
[]
[ "mongoose", "mongoose_schema", "reactjs", "reference" ]
stackoverflow_0074619352_mongoose_mongoose_schema_reactjs_reference.txt
Q: Jest encountered an unexpected token Not sure why it's complaining on this line: const wrapper = shallow(<BitcoinWidget {...props} />); /Users/leongaban/projects/match/bitcoin/src/components/bitcoinWidget.test.js: Unexpected token (17:26) Jest encountered an unexpected token This usually means that you are trying to import a file which Jest cannot parse, e.g. it's not plain JavaScript. By default, if Jest sees a Babel config, it will use that to transform your files, ignoring "node_modules". Here's what you can do: - To have some of your "node_modules" files transformed, you can specify a custom "transformIgnorePatterns" in your config. - If you need a custom transformation specify a "transform" option in your config. - If you simply want to mock your non-JS modules (e.g. binary assets) you can stub them out with the "moduleNameMapper" config option. You'll find more details and examples of these config options in the docs: https://jestjs.io/docs/en/configuration.html Details: 15 | 16 | describe('when rendering', () => { >17 | const wrapper = shallow(<BitcoinWidget {...props} />); 18 | ^ 19 | it('should render a component matching the snapshot', () => { 20 | const tree = toJson(wrapper); Entire test: import React from 'react'; import { shallow } from 'enzyme'; import toJson from 'enzyme-to-json'; // Local components import BitcoinWidget from './bitcoinWidget'; const props = { logo: 'foobar', coin: { price: 0 }, refresh: jest.fn() } describe('when rendering', () => { const wrapper = shallow(<BitcoinWidget {...props} />); it('should render a component matching the snapshot', () => { const tree = toJson(wrapper); expect(tree).toMatchSnapshot(); expect(wrapper).toHaveLength(1); }); }); The component import React from 'react'; const BitcoinWidget = ({ logo, coin : { price }, refresh }) => { return ( <div className="bitcoin-wrapper shadow"> <header> <img src={logo} alt="Bitcoin Logo"/> </header> <div className="price"> Coinbase ${price} </div> <button className="btn striped-shadow white" onClick={refresh}> <span>Refresh</span> </button> </div> ); } export default BitcoinWidget; And my package.json { "name": "bitcoin", "version": "0.1.0", "private": true, "dependencies": { "axios": "^0.18.0", "react": "^16.4.2", "react-dom": "^16.4.2", "react-redux": "^5.0.7", "react-scripts": "1.1.5", "redux": "^4.0.0", "redux-thunk": "^2.3.0" }, "scripts": { "start": "react-scripts start", "build": "react-scripts build", "eject": "react-scripts eject", "test": "yarn run test-jest:update --verbose --maxWorkers=2", "test-jest:update": "jest src --updateSnapshot", "test-jest": "jest src" }, "now": { "name": "bitcoin", "engines": { "node": "8.11.3" }, "alias": "leongaban.com" }, "jest": { "verbose": true, "moduleNameMapper": { "\\.(jpg|jpeg|png|gif|eot|otf|webp|svg|ttf|woff|woff2|mp4|webm|wav|mp3|m4a|aac|oga)$": "<rootDir>/client/assetsTransformer.js" }, "moduleFileExtensions": [ "js", "jsx" ], "moduleDirectories": [ "node_modules" ] }, "devDependencies": { "enzyme": "^3.4.4", "enzyme-to-json": "^3.3.4", "jest": "^23.5.0" } } A: Add this in your package.json jest config. "transform": { "\\.js$": "<rootDir>/node_modules/babel-jest" }, Let me know if the issue still persists. A: For anyone using create-react-app, only certain jest configurations can be changed in package.json when using create-react-app. I have issues with Jest picking up an internal library, Jest would display 'unexpected token' errors wherever I had my imports from this library. To solve this, you can change your test script to the below: "test": "react-scripts test --transformIgnorePatterns 'node_modules/(?!(<your-package-goes-here>)/)'", A: for anyone who struggled with this issue and non of the above answers worked for him/her. after about a long time of searching, I reached for this solution edit your jest.config.js to add transformIgnorePatterns //jest.config.js module.exports = { preset: 'ts-jest', testEnvironment: 'jsdom', testMatch: ["**/__tests__/**/*.ts?(x)", "**/?(*.)+(test).ts?(x)"], transform: { "^.+\\.(js|ts)$": "ts-jest", }, transformIgnorePatterns: [ "/node_modules/(?![@autofiy/autofiyable|@autofiy/property]).+\\.js$", "/node_modules/(?![@autofiy/autofiyable|@autofiy/property]).+\\.ts$", "/node_modules/(?![@autofiy/autofiyable|@autofiy/property]).+\\.tsx$", ], } put the packages that you want to ignore inside [] and separate them by | in my case [@autofiy/autofiyable|@autofiy/property] A: I also encountered the same error while setting up Jest in my React app created using Webpack. I had to add @babel/preset-env and it was fixed. I have also written a blog article about the same. npm i -D @babel/preset-env And then add this in "presets" in .babelrc file. E.g. { "presets": ["@babel/react", "@babel/env"] } https://medium.com/@shubhgupta147/how-i-solved-issues-while-setting-up-jest-and-enzyme-in-a-react-app-created-using-webpack-7e321647f080?sk=f3af93732228d60ccb24b47ef48d7062 A: I added the jest update to my package.json "jest": { "transformIgnorePatterns": [ "node_modules/(?!(<package-name>|<second-package-name>)/)" ] }, Feel free to remove the |<second-package-name> if not required. You can also do it as part of your script as mentioned @paulosullivan22 "test": "react-scripts test --transformIgnorePatterns 'node_modules/(?!(<package-name>)/)'" A: In my case, the issue was that I was importing the original module in the mocked module: import urlExist from "url-exist"; async function stubbedUrlExist(): Promise<boolean> { // do something } export default stubbedUrlExist; The solution was to not import url-exist in the url-exist mock. This might have lead to a circular import. Jest was perhaps catching this error in a generic try<>catch block dealing with the loading of modules. A: Below works for me module.exports = { presets: [ ["@babel/preset-env", { targets: { node: "current" } }], "@babel/preset-typescript", "@babel/react" ] }; A: could not get it working with transforms, I ended up mocking the dependency: Create a file: <path>/react-markdown.js import React from 'react'; function ReactMarkdown({ children }){ return <>{children}</>; } export default ReactMarkdown; On jest.config.js file add: module.exports = { moduleNameMapper: { 'react-markdown': '<path>/mocks/react-markdown.js', }, }; credits to juanmartinez on https://github.com/remarkjs/react-markdown/issues/635 A: Below works for me. Create babel.config.js file. module.exports = { presets: [ [ '@babel/preset-env', { targets: { esmodules: true } } ], [ '@babel/preset-react', { runtime: 'automatic' } ], ], };
Jest encountered an unexpected token
Not sure why it's complaining on this line: const wrapper = shallow(<BitcoinWidget {...props} />); /Users/leongaban/projects/match/bitcoin/src/components/bitcoinWidget.test.js: Unexpected token (17:26) Jest encountered an unexpected token This usually means that you are trying to import a file which Jest cannot parse, e.g. it's not plain JavaScript. By default, if Jest sees a Babel config, it will use that to transform your files, ignoring "node_modules". Here's what you can do: - To have some of your "node_modules" files transformed, you can specify a custom "transformIgnorePatterns" in your config. - If you need a custom transformation specify a "transform" option in your config. - If you simply want to mock your non-JS modules (e.g. binary assets) you can stub them out with the "moduleNameMapper" config option. You'll find more details and examples of these config options in the docs: https://jestjs.io/docs/en/configuration.html Details: 15 | 16 | describe('when rendering', () => { >17 | const wrapper = shallow(<BitcoinWidget {...props} />); 18 | ^ 19 | it('should render a component matching the snapshot', () => { 20 | const tree = toJson(wrapper); Entire test: import React from 'react'; import { shallow } from 'enzyme'; import toJson from 'enzyme-to-json'; // Local components import BitcoinWidget from './bitcoinWidget'; const props = { logo: 'foobar', coin: { price: 0 }, refresh: jest.fn() } describe('when rendering', () => { const wrapper = shallow(<BitcoinWidget {...props} />); it('should render a component matching the snapshot', () => { const tree = toJson(wrapper); expect(tree).toMatchSnapshot(); expect(wrapper).toHaveLength(1); }); }); The component import React from 'react'; const BitcoinWidget = ({ logo, coin : { price }, refresh }) => { return ( <div className="bitcoin-wrapper shadow"> <header> <img src={logo} alt="Bitcoin Logo"/> </header> <div className="price"> Coinbase ${price} </div> <button className="btn striped-shadow white" onClick={refresh}> <span>Refresh</span> </button> </div> ); } export default BitcoinWidget; And my package.json { "name": "bitcoin", "version": "0.1.0", "private": true, "dependencies": { "axios": "^0.18.0", "react": "^16.4.2", "react-dom": "^16.4.2", "react-redux": "^5.0.7", "react-scripts": "1.1.5", "redux": "^4.0.0", "redux-thunk": "^2.3.0" }, "scripts": { "start": "react-scripts start", "build": "react-scripts build", "eject": "react-scripts eject", "test": "yarn run test-jest:update --verbose --maxWorkers=2", "test-jest:update": "jest src --updateSnapshot", "test-jest": "jest src" }, "now": { "name": "bitcoin", "engines": { "node": "8.11.3" }, "alias": "leongaban.com" }, "jest": { "verbose": true, "moduleNameMapper": { "\\.(jpg|jpeg|png|gif|eot|otf|webp|svg|ttf|woff|woff2|mp4|webm|wav|mp3|m4a|aac|oga)$": "<rootDir>/client/assetsTransformer.js" }, "moduleFileExtensions": [ "js", "jsx" ], "moduleDirectories": [ "node_modules" ] }, "devDependencies": { "enzyme": "^3.4.4", "enzyme-to-json": "^3.3.4", "jest": "^23.5.0" } }
[ "Add this in your package.json jest config.\n\"transform\": {\n \"\\\\.js$\": \"<rootDir>/node_modules/babel-jest\"\n },\n\nLet me know if the issue still persists.\n", "For anyone using create-react-app, only certain jest configurations can be changed in package.json when using create-react-app.\nI have issues with Jest picking up an internal library, Jest would display 'unexpected token' errors wherever I had my imports from this library. \nTo solve this, you can change your test script to the below: \n\"test\": \"react-scripts test --transformIgnorePatterns 'node_modules/(?!(<your-package-goes-here>)/)'\",\n", "for anyone who struggled with this issue and non of the above answers worked for him/her.\nafter about a long time of searching, I reached for this solution\nedit your jest.config.js to add transformIgnorePatterns\n//jest.config.js\n\nmodule.exports = {\n preset: 'ts-jest',\n testEnvironment: 'jsdom',\n testMatch: [\"**/__tests__/**/*.ts?(x)\", \"**/?(*.)+(test).ts?(x)\"],\n transform: {\n \"^.+\\\\.(js|ts)$\": \"ts-jest\",\n },\n transformIgnorePatterns: [\n \"/node_modules/(?![@autofiy/autofiyable|@autofiy/property]).+\\\\.js$\",\n \"/node_modules/(?![@autofiy/autofiyable|@autofiy/property]).+\\\\.ts$\",\n \"/node_modules/(?![@autofiy/autofiyable|@autofiy/property]).+\\\\.tsx$\",\n ],\n}\n\nput the packages that you want to ignore inside [] and separate them by |\nin my case [@autofiy/autofiyable|@autofiy/property]\n", "I also encountered the same error while setting up Jest in my React app created using Webpack. I had to add @babel/preset-env and it was fixed. I have also written a blog article about the same.\nnpm i -D @babel/preset-env\nAnd then add this in \"presets\" in .babelrc file. E.g.\n{ \n \"presets\": [\"@babel/react\", \"@babel/env\"]\n}\n\nhttps://medium.com/@shubhgupta147/how-i-solved-issues-while-setting-up-jest-and-enzyme-in-a-react-app-created-using-webpack-7e321647f080?sk=f3af93732228d60ccb24b47ef48d7062\n", "I added the jest update to my package.json\n\"jest\": {\n \"transformIgnorePatterns\": [\n \"node_modules/(?!(<package-name>|<second-package-name>)/)\"\n ]\n},\n\nFeel free to remove the |<second-package-name> if not required.\nYou can also do it as part of your script as mentioned @paulosullivan22\n\"test\": \"react-scripts test --transformIgnorePatterns 'node_modules/(?!(<package-name>)/)'\"\n", "In my case, the issue was that I was importing the original module in the mocked module:\nimport urlExist from \"url-exist\";\n\nasync function stubbedUrlExist(): Promise<boolean> {\n // do something\n}\n\nexport default stubbedUrlExist;\n\nThe solution was to not import url-exist in the url-exist mock. This might have lead to a circular import. Jest was perhaps catching this error in a generic try<>catch block dealing with the loading of modules.\n", "Below works for me\nmodule.exports = {\n presets: [\n [\"@babel/preset-env\", { targets: { node: \"current\" } }],\n \"@babel/preset-typescript\", \"@babel/react\"\n ]\n};\n\n", "could not get it working with transforms, I ended up mocking the dependency:\nCreate a file: <path>/react-markdown.js\nimport React from 'react';\n\nfunction ReactMarkdown({ children }){\n return <>{children}</>;\n}\n\nexport default ReactMarkdown;\n\nOn jest.config.js file add:\nmodule.exports = {\n moduleNameMapper: {\n 'react-markdown': '<path>/mocks/react-markdown.js',\n },\n};\n\ncredits to juanmartinez on https://github.com/remarkjs/react-markdown/issues/635\n", "Below works for me.\nCreate babel.config.js file.\nmodule.exports = {\npresets: [\n [ '@babel/preset-env', { targets: { esmodules: true } } ],\n [ '@babel/preset-react', { runtime: 'automatic' } ],\n],\n\n};\n" ]
[ 13, 8, 7, 4, 4, 1, 0, 0, 0 ]
[]
[]
[ "javascript", "jestjs", "reactjs" ]
stackoverflow_0051994111_javascript_jestjs_reactjs.txt
Q: WPF XAML, Re-Use a color / solidbrush in ResourceDictionary I have some colors in my WPF C# app, which I want to re-use. To avoid double typing (and maintaining issues). <ResourceDictionary xmlns="http://schemas.microsoft.com/winfx/2006/xaml/presentation" xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml"> <SolidColorBrush x:Key="mybackground">#ffffff</SolidColorBrush> <SolidColorBrush x:Key="myforeground">#000000</SolidColorBrush> <SolidColorBrush x:Key="HeaderBackground">#000000</SolidColorBrush> <SolidColorBrush x:Key="HeaderForeground">#ffffff</SolidColorBrush> </ResourceDictionary> how can I re-use the mybackground or myforeground in other brushes? <ResourceDictionary xmlns="http://schemas.microsoft.com/winfx/2006/xaml/presentation" xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml"> <SolidColorBrush x:Key="mybackground">#ffffff</SolidColorBrush> <SolidColorBrush x:Key="myforeground">#000000</SolidColorBrush> <SolidColorBrush x:Key="HeaderBackground">{StaticResource myforeground}/SolidColorBrush> <SolidColorBrush x:Key="HeaderForeground">{StaticResource mybackground}/SolidColorBrush> </ResourceDictionary> HINT: The header use the fore- and background color toggeld to the main content. I tried also sth. like <SolidColorBrush x:Key="HeaderBackground" Color="{StaticResource myforeground}"></SolidColorBrush> <SolidColorBrush x:Key="HeaderForeground" Color="{StaticResource mybackground}"></SolidColorBrush> put here I get the obvious warning incompatible type. A: if you want to reuse colors, define Color as resource: <Color x:Key="red">#FF0000</Color> <SolidColorBrush x:Key="ErrorBrush" Color="{StaticResource red}"/>
WPF XAML, Re-Use a color / solidbrush in ResourceDictionary
I have some colors in my WPF C# app, which I want to re-use. To avoid double typing (and maintaining issues). <ResourceDictionary xmlns="http://schemas.microsoft.com/winfx/2006/xaml/presentation" xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml"> <SolidColorBrush x:Key="mybackground">#ffffff</SolidColorBrush> <SolidColorBrush x:Key="myforeground">#000000</SolidColorBrush> <SolidColorBrush x:Key="HeaderBackground">#000000</SolidColorBrush> <SolidColorBrush x:Key="HeaderForeground">#ffffff</SolidColorBrush> </ResourceDictionary> how can I re-use the mybackground or myforeground in other brushes? <ResourceDictionary xmlns="http://schemas.microsoft.com/winfx/2006/xaml/presentation" xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml"> <SolidColorBrush x:Key="mybackground">#ffffff</SolidColorBrush> <SolidColorBrush x:Key="myforeground">#000000</SolidColorBrush> <SolidColorBrush x:Key="HeaderBackground">{StaticResource myforeground}/SolidColorBrush> <SolidColorBrush x:Key="HeaderForeground">{StaticResource mybackground}/SolidColorBrush> </ResourceDictionary> HINT: The header use the fore- and background color toggeld to the main content. I tried also sth. like <SolidColorBrush x:Key="HeaderBackground" Color="{StaticResource myforeground}"></SolidColorBrush> <SolidColorBrush x:Key="HeaderForeground" Color="{StaticResource mybackground}"></SolidColorBrush> put here I get the obvious warning incompatible type.
[ "if you want to reuse colors, define Color as resource:\n<Color x:Key=\"red\">#FF0000</Color>\n<SolidColorBrush x:Key=\"ErrorBrush\" Color=\"{StaticResource red}\"/>\n\n" ]
[ 1 ]
[]
[]
[ "wpf", "xaml" ]
stackoverflow_0074674713_wpf_xaml.txt
Q: LaunchDaemon service on MacOS not running until user signs in I have an app service I want to start at system startup with a plist file: <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> <plist version="1.0"> <dict> <key>Label</key> <string>com.my.app.ident</string> <key>ProgramArguments</key> <array> <string>/Users/me/Desktop/MyApp/App</string> </array> <key>RunAtLoad</key> <true/> <key>KeepAlive</key> <true/> <key>StandardOutPath</key> <string>/Users/me/Desktop/MyApp/logfile.log</string> <key>StandardErrorPath</key> <string>/Users/me/Desktop/MyApp/logerr.log</string> <key>UserName</key> <string>me</string> </dict> </plist> This has been created with sudo chown root:wheel /Library/LaunchDaemons/com.my.app.ident.plist and permissions sudo chmod a+x /Library/LaunchDaemons/com.my.app.ident.plist and then loaded: sudo launchctl load -w /Library/LaunchDaemons/com.my.app.ident.plist The application has permissions like: -rwxr-xr-x 1 me staff 54755728 29 Nov 12:46 App Which runs fine but is not starting up with the system - it just logs errors to logerr.log repeating: Couldn't memory map the bundle file for reading. A fatal error occured while processing application bundle Failed to map file. open(/Users/me/Desktop/MyApp/App) failed with error 1 Failure processing application bundle. These errors stop when the user "me" signs in and then the service starts working. I need it to work without "me" signing in. any ideas? A: From discussion in the comments, we have narrowed it down to a path issue. The exact mechanism preventing access isn't 100% clear to me here, but pre-login is a fairly locked down environment, so I'm not terribly surprised it doesn't work running from your user's desktop folder. I suspect that in your particular case it's down to the consent system: the logged-in user needs to grant each process access to certain directories such as Desktop, Documents, Downloads, etc., and as there is no user logged in, there will be no consent on record. If your executable was inside an .app bundle, app translocation would be an additional concern. The solution is to install launch daemon or global launch agent binaries system-wide. If they're part of an .app bundle, install the app in /Applications (this will also avoid app translocation issues). Otherwise, a "good" location is /Library/Application Support/[Your-Application]/. Incidentally, if you're installing your daemon from a GUI app, on macOS 12 'Monterey' or older, SMJobBless is an even better solution than manually picking a location and dropping a plist into /Library/LaunchDaemons. For macOS 13 'Ventura' and newer, take a look at the section about daemons and agents in the WWDC22 'What's new in Privacy' session and the new SMAppService APIs available there.
LaunchDaemon service on MacOS not running until user signs in
I have an app service I want to start at system startup with a plist file: <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> <plist version="1.0"> <dict> <key>Label</key> <string>com.my.app.ident</string> <key>ProgramArguments</key> <array> <string>/Users/me/Desktop/MyApp/App</string> </array> <key>RunAtLoad</key> <true/> <key>KeepAlive</key> <true/> <key>StandardOutPath</key> <string>/Users/me/Desktop/MyApp/logfile.log</string> <key>StandardErrorPath</key> <string>/Users/me/Desktop/MyApp/logerr.log</string> <key>UserName</key> <string>me</string> </dict> </plist> This has been created with sudo chown root:wheel /Library/LaunchDaemons/com.my.app.ident.plist and permissions sudo chmod a+x /Library/LaunchDaemons/com.my.app.ident.plist and then loaded: sudo launchctl load -w /Library/LaunchDaemons/com.my.app.ident.plist The application has permissions like: -rwxr-xr-x 1 me staff 54755728 29 Nov 12:46 App Which runs fine but is not starting up with the system - it just logs errors to logerr.log repeating: Couldn't memory map the bundle file for reading. A fatal error occured while processing application bundle Failed to map file. open(/Users/me/Desktop/MyApp/App) failed with error 1 Failure processing application bundle. These errors stop when the user "me" signs in and then the service starts working. I need it to work without "me" signing in. any ideas?
[ "From discussion in the comments, we have narrowed it down to a path issue. The exact mechanism preventing access isn't 100% clear to me here, but pre-login is a fairly locked down environment, so I'm not terribly surprised it doesn't work running from your user's desktop folder.\nI suspect that in your particular case it's down to the consent system: the logged-in user needs to grant each process access to certain directories such as Desktop, Documents, Downloads, etc., and as there is no user logged in, there will be no consent on record.\nIf your executable was inside an .app bundle, app translocation would be an additional concern.\nThe solution is to install launch daemon or global launch agent binaries system-wide. If they're part of an .app bundle, install the app in /Applications (this will also avoid app translocation issues). Otherwise, a \"good\" location is /Library/Application Support/[Your-Application]/.\nIncidentally, if you're installing your daemon from a GUI app, on macOS 12 'Monterey' or older, SMJobBless is an even better solution than manually picking a location and dropping a plist into /Library/LaunchDaemons.\nFor macOS 13 'Ventura' and newer, take a look at the section about daemons and agents in the WWDC22 'What's new in Privacy' session and the new SMAppService APIs available there.\n" ]
[ 0 ]
[]
[]
[ "launch_daemon", "launchd", "macos", "macos_monterey", "plist" ]
stackoverflow_0074664759_launch_daemon_launchd_macos_macos_monterey_plist.txt
Q: Twilio: Using a TwiML app from another twilio account I have two twilio accounts. On the first I created a TwiML app configured to use webhooks. On the second one I have a number configured with the same webhooks as the previous app. All the ivr steps work fine, but when I have to answer via browser to a call, I receive this error: {code: 31002, connection: Connection, message: 'Connection Declined' …. My goal is to have a unique app to handle different customer twilio accounts. Is this possible or I have to create a Twiml app in every customer account? thanks A: I'm not really sure I fully understands your scenario. Can you please elaborate on what you are trying to achieve? When you write "... but when I have to answer via browser to a call, I receive this error..." - what exactly are you doing? are you trying to answer an inbound call using Flex? are you trying to answer the call using a WebRTC API? There are elements in your reports which are missing. Please fill the missing gaps.
Twilio: Using a TwiML app from another twilio account
I have two twilio accounts. On the first I created a TwiML app configured to use webhooks. On the second one I have a number configured with the same webhooks as the previous app. All the ivr steps work fine, but when I have to answer via browser to a call, I receive this error: {code: 31002, connection: Connection, message: 'Connection Declined' …. My goal is to have a unique app to handle different customer twilio accounts. Is this possible or I have to create a Twiml app in every customer account? thanks
[ "I'm not really sure I fully understands your scenario. Can you please elaborate on what you are trying to achieve? When you write \"... but when I have to answer via browser to a call, I receive this error...\" - what exactly are you doing? are you trying to answer an inbound call using Flex? are you trying to answer the call using a WebRTC API?\nThere are elements in your reports which are missing. Please fill the missing gaps.\n" ]
[ 0 ]
[]
[]
[ "twilio", "twilio_twiml" ]
stackoverflow_0074613792_twilio_twilio_twiml.txt
Q: how to make Intellij extends exception by default not throwable java When I define a new exeption on IntelliJ IDEA Ultimate and use ctlr + enter to create the exception class, it extends Throwable by default, I would like to change that to exception but I can't find where it is in the settings. I've tried to to the settings in Editor -> File and Code Templates but I can't find what I'm searching for. A: To change the default behavior for creating new exception classes in IntelliJ IDEA follow these steps: In the main menu, go to File > Settings (or Preferences on macOS). In the Settings/Preferences dialog, navigate to the Editor > File and Code Templates section. In the right pane, expand the Code tab and select the Exception entry. In the editor window, modify the template code as needed to change the default behavior. For example, to make the created exception class extend the Exception class instead of Throwable modify the code as follows: #set( $exception = "Exception" ) public class $NAME extends $exception { public $NAME() { } public $NAME(String message) { super(message); } public $NAME(String message, Throwable cause) { super(message, cause); } public $NAME(Throwable cause) { super(cause); } protected $NAME(String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace) { super(message, cause, enableSuppression, writableStackTrace); } } After making the desired changes, click the Apply button to save changes. The next time you use the keyboard shortcut (Ctrl + Enter) to create a new exception class, it will use the updated template code and extend the Exception class by default.
how to make Intellij extends exception by default not throwable java
When I define a new exeption on IntelliJ IDEA Ultimate and use ctlr + enter to create the exception class, it extends Throwable by default, I would like to change that to exception but I can't find where it is in the settings. I've tried to to the settings in Editor -> File and Code Templates but I can't find what I'm searching for.
[ "To change the default behavior for creating new exception classes in IntelliJ IDEA follow these steps:\n\nIn the main menu, go to File > Settings (or Preferences on macOS).\nIn the Settings/Preferences dialog, navigate to the Editor > File and Code Templates section.\nIn the right pane, expand the Code tab and select the Exception entry.\nIn the editor window, modify the template code as needed to change the default behavior.\n\nFor example, to make the created exception class extend the Exception class instead of Throwable modify the code as follows:\n#set( $exception = \"Exception\" )\npublic class $NAME extends $exception {\n public $NAME() {\n }\n\n public $NAME(String message) {\n super(message);\n }\n\n public $NAME(String message, Throwable cause) {\n super(message, cause);\n }\n\n public $NAME(Throwable cause) {\n super(cause);\n }\n\n protected $NAME(String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace) {\n super(message, cause, enableSuppression, writableStackTrace);\n }\n}\n\nAfter making the desired changes, click the Apply button to save changes. The next time you use the keyboard shortcut (Ctrl + Enter) to create a new exception class, it will use the updated template code and extend the Exception class by default.\n" ]
[ 0 ]
[]
[]
[ "default", "exception", "extends", "intellij_idea", "java" ]
stackoverflow_0074669424_default_exception_extends_intellij_idea_java.txt
Q: How to make responsive website using Html and Css? I have made travelling using html and css as mini project. But now i want to make it responsive for all devices. I try it make it responsive but i am not able to make it fully responsive, i am not able to fix images and footer of my site.
How to make responsive website using Html and Css?
I have made travelling using html and css as mini project. But now i want to make it responsive for all devices. I try it make it responsive but i am not able to make it fully responsive, i am not able to fix images and footer of my site.
[]
[]
[ "Use media queries and CSS flexbox.\nMedia queries allow you to specify different styles for different screen sizes. mobile devices,larger screen, etc.\nCSS flexbox is to create flexible layouts that auto adjust to the size of the screen.\n<style>\n/* Mobile styles */\n@media only screen and (max-width: 600px) {\n /* Make the website full-width */\n body {\n width: 100%;\n }\n\n /* Use flexbox to arrange the website elements */\n .container {\n display: flex;\n flex-direction: column;\n }\n\n /* Make the header and footer full-width */\n header, footer {\n width: 100%;\n }\n\n /* Make the images full-width and resize them to fit the screen */\n img {\n width: 100%;\n height: auto;\n }\n}\n</style>\n\n<body>\n <div class=\"container\">\n <header>\n ...\n </header>\n\n <main>\n ...\n </main>\n\n <footer>\n ...\n </footer>\n </div>\n</body>\n\n" ]
[ -1 ]
[ "css", "html", "javascript" ]
stackoverflow_0074674742_css_html_javascript.txt
Q: React Hook Form move items between useFieldArray lists I'm using React Hook Form to build a basic page builder application and it's been brilliant so far, I've been using the useFieldArray hook to create lists that contain items, however, I haven't found a way to move items between lists. I know I can use the move() function to reorder items within the same list, however, since each list has its own nested useFieldArray I can't move the item from one list component to another list component. If anyone knows of a way around this it would be much appreciated! Here is a very simplified example of my current setup: export const App = () => { const methods = useForm({ defaultValues: { lists: [ { list_id: 1, items: [ { item_id: 1, name: 'Apple' }, { item_id: 2, name: 'Orange' } ] }, { list_id: 2, items: [ { item_id: 3, name: 'Banana' }, { item_id: 4, name: 'Lemon' } ] } ] } }); return ( <FormProvider {...methods}> <Page/> </FormProvider> ) } export const Page = () => { const { control } = useFormContext(); const { fields } = useFieldArray({ control, name: 'lists' }) return ( <ul> {fields?.map((field, index) => ( <List listIdx={index} /> ))} </ul> ) } export const List = ({ listIdx }) => { const { control, watch } = useFormContext(); const { fields, move } = useFieldArray({ control, name: `lists[${sectionIdx}].items` }) const handleMove = (prevIdx, nextIdx) => { // this allows me to move within lists but not between them move(prevIdx, nextIdx); } return ( <li> <p>ID: {watch(lists[${listIdx}].list_id)}</p> <ul> {fields?.map((field, index) => ( <Item listIdx={index} itemIdx={index} handleMove={handleMove}/> ))} </ul> </li> ) } export const Item = ({ listIdx, itemIdx, handleMove }) => { const { control, register } = useFormContext(); return ( <li> <p>ID: {watch(lists[${listIdx}].items[${itemIdx}].item_id)}</p> <label Name: <input { ...register('lists[${listIdx}].items[${itemIdx}]) }/> /> <button onClick={() => handleMove(itemIdx, itemIdx - 1)}>Up</button> <button onClick={() => handleMove(itemIdx, itemIdx + 1)}>Down</button> </div> ) } Thanks in advance! A: If you'd not like to alter your default values (your data structure), I think the best way to handle this is using update method returning from useFieldArray. You have the data of both inputs that are going to be moved around, knowing their list index and item index, you could easily update their current positions with each other's data.
React Hook Form move items between useFieldArray lists
I'm using React Hook Form to build a basic page builder application and it's been brilliant so far, I've been using the useFieldArray hook to create lists that contain items, however, I haven't found a way to move items between lists. I know I can use the move() function to reorder items within the same list, however, since each list has its own nested useFieldArray I can't move the item from one list component to another list component. If anyone knows of a way around this it would be much appreciated! Here is a very simplified example of my current setup: export const App = () => { const methods = useForm({ defaultValues: { lists: [ { list_id: 1, items: [ { item_id: 1, name: 'Apple' }, { item_id: 2, name: 'Orange' } ] }, { list_id: 2, items: [ { item_id: 3, name: 'Banana' }, { item_id: 4, name: 'Lemon' } ] } ] } }); return ( <FormProvider {...methods}> <Page/> </FormProvider> ) } export const Page = () => { const { control } = useFormContext(); const { fields } = useFieldArray({ control, name: 'lists' }) return ( <ul> {fields?.map((field, index) => ( <List listIdx={index} /> ))} </ul> ) } export const List = ({ listIdx }) => { const { control, watch } = useFormContext(); const { fields, move } = useFieldArray({ control, name: `lists[${sectionIdx}].items` }) const handleMove = (prevIdx, nextIdx) => { // this allows me to move within lists but not between them move(prevIdx, nextIdx); } return ( <li> <p>ID: {watch(lists[${listIdx}].list_id)}</p> <ul> {fields?.map((field, index) => ( <Item listIdx={index} itemIdx={index} handleMove={handleMove}/> ))} </ul> </li> ) } export const Item = ({ listIdx, itemIdx, handleMove }) => { const { control, register } = useFormContext(); return ( <li> <p>ID: {watch(lists[${listIdx}].items[${itemIdx}].item_id)}</p> <label Name: <input { ...register('lists[${listIdx}].items[${itemIdx}]) }/> /> <button onClick={() => handleMove(itemIdx, itemIdx - 1)}>Up</button> <button onClick={() => handleMove(itemIdx, itemIdx + 1)}>Down</button> </div> ) } Thanks in advance!
[ "If you'd not like to alter your default values (your data structure), I think the best way to handle this is using update method returning from useFieldArray. You have the data of both inputs that are going to be moved around, knowing their list index and item index, you could easily update their current positions with each other's data.\n" ]
[ 0 ]
[]
[]
[ "javascript", "react_hook_form", "reactjs" ]
stackoverflow_0074671634_javascript_react_hook_form_reactjs.txt
Q: How do I separate api / async request logic from react components when using recoil So at the moment I am having to put my request / api logic directly into my components because what I need to do a lot of the time is set state based on the response I get from the back end. Below is a function that I have on my settings page that I use to save the settings to recoil after the user hits save on the form: const setUserConfig = useSetRecoilState(userAtoms.userConfig); const submitSettings = async (values: UserConfigInterface) => { try { const { data: {data} } = await updateUser(values); setUserConfig({ ...data }); } catch (error) { console.log('settings form error: ', error); } } This works perfectly...I just dont want the function in my component as most of my components are getting way bigger than they need to be. I have tried making a separate file to do this but I can only use the recoil hooks (in this instance useSetRecoilState) inside of components and it just complains when I try and do this outside of a react component. I have tried implementing this with recoils selector and selectorFamily functions but it gets kind of complicated. Here is how I have tried it inside of a file that has atoms / selectors only: export const languageProgress = atom<LanguageProgress>({ key: "LanguageProgress", default: { level: 1, xp: 0, max_xp: 0 } }) export const languageProgressUpdate = selectorFamily<LanguageProgress>({ key: "LanguageProgress", get: () => async () => { try { const { data: { data } } = await getLanguageProgress(); return data; } catch (error) { console.log('get language progress error'); } }, set: (params:object) => async ({set}) => { try { const { data: { data } } = await updateLanguageProgress(params); set(languageProgress, { level: data.level, xp: data.xp, max_xp: data.max_xp }); } catch (error) { console.log('language progress update error: ', error); } } }); What I want to do here is get the values I need from the back end and display it in the front which I can do in the selector function get but now I have 2 points of truth for this...my languageProgress atom will initially be incorrect as its not getting anything from the database so I have to use useGetRevoilValue on the languageProgressUpdate selector I have made but then when I want to update I am updating the atom and not the actual value. I cannot find a good example anywhere that does what I am trying to here (very suprisingly as I would have thought it is quite a common way to do things...get data from back end and set it in state.) and I can't figure out a way to do it without doing it in the component (as in the first example). Ideally I would like something like the first example but outside of a component because that solution is super simple and works for me. A: So I dont know if this is the best answer but it does work and ultimately what I wanted to do was seperate the logic from the screen component. The answer in my situation is a bit long winded but this is what I used to solve the problem: https://medium.com/geekculture/crud-with-recoiljs-and-remote-api-e36581b77168 Essentially the answer is to put all the logic into a hook and get state from the api and set it there.
How do I separate api / async request logic from react components when using recoil
So at the moment I am having to put my request / api logic directly into my components because what I need to do a lot of the time is set state based on the response I get from the back end. Below is a function that I have on my settings page that I use to save the settings to recoil after the user hits save on the form: const setUserConfig = useSetRecoilState(userAtoms.userConfig); const submitSettings = async (values: UserConfigInterface) => { try { const { data: {data} } = await updateUser(values); setUserConfig({ ...data }); } catch (error) { console.log('settings form error: ', error); } } This works perfectly...I just dont want the function in my component as most of my components are getting way bigger than they need to be. I have tried making a separate file to do this but I can only use the recoil hooks (in this instance useSetRecoilState) inside of components and it just complains when I try and do this outside of a react component. I have tried implementing this with recoils selector and selectorFamily functions but it gets kind of complicated. Here is how I have tried it inside of a file that has atoms / selectors only: export const languageProgress = atom<LanguageProgress>({ key: "LanguageProgress", default: { level: 1, xp: 0, max_xp: 0 } }) export const languageProgressUpdate = selectorFamily<LanguageProgress>({ key: "LanguageProgress", get: () => async () => { try { const { data: { data } } = await getLanguageProgress(); return data; } catch (error) { console.log('get language progress error'); } }, set: (params:object) => async ({set}) => { try { const { data: { data } } = await updateLanguageProgress(params); set(languageProgress, { level: data.level, xp: data.xp, max_xp: data.max_xp }); } catch (error) { console.log('language progress update error: ', error); } } }); What I want to do here is get the values I need from the back end and display it in the front which I can do in the selector function get but now I have 2 points of truth for this...my languageProgress atom will initially be incorrect as its not getting anything from the database so I have to use useGetRevoilValue on the languageProgressUpdate selector I have made but then when I want to update I am updating the atom and not the actual value. I cannot find a good example anywhere that does what I am trying to here (very suprisingly as I would have thought it is quite a common way to do things...get data from back end and set it in state.) and I can't figure out a way to do it without doing it in the component (as in the first example). Ideally I would like something like the first example but outside of a component because that solution is super simple and works for me.
[ "So I dont know if this is the best answer but it does work and ultimately what I wanted to do was seperate the logic from the screen component.\nThe answer in my situation is a bit long winded but this is what I used to solve the problem: https://medium.com/geekculture/crud-with-recoiljs-and-remote-api-e36581b77168\nEssentially the answer is to put all the logic into a hook and get state from the api and set it there.\n" ]
[ 0 ]
[]
[]
[ "react_native", "reactjs", "recoiljs", "typescript" ]
stackoverflow_0074634732_react_native_reactjs_recoiljs_typescript.txt
Q: Checking if a list has all the contents of another Let's say I have a list of ints called List<int> List1 = new() {1,2,3,4} and List<int> List2 = new() {2,3,4}. I need to check if List2 has everything List1 has. Keep in mind that list lengths may vary. I have no idea on how to make it. Searching on the internet didnt really help. A: I would advise you to use HashSet for the first set of values and just iterate over the second one to check whether the iterator value exists in the created HashSet or not: var set = new HashSet<int>(list1); var result = list2.All(item => set.Contains(item)); If you don't want to use HashSet, you can do the same thing with LINQ, but with worse performance: var result = list2.All(il2 => list1.Any(il1 => il1 == il2)); or var result = !list2.Except(list1).Any(); A: Here is the simple code, so you need to check whether ls2 is a subset of ls1. bool isSubset = !ls2.Except(ls1).Any();
Checking if a list has all the contents of another
Let's say I have a list of ints called List<int> List1 = new() {1,2,3,4} and List<int> List2 = new() {2,3,4}. I need to check if List2 has everything List1 has. Keep in mind that list lengths may vary. I have no idea on how to make it. Searching on the internet didnt really help.
[ "I would advise you to use HashSet for the first set of values and just iterate over the second one to check whether the iterator value exists in the created HashSet or not:\nvar set = new HashSet<int>(list1);\nvar result = list2.All(item => set.Contains(item));\n\nIf you don't want to use HashSet, you can do the same thing with LINQ, but with worse performance:\nvar result = list2.All(il2 => list1.Any(il1 => il1 == il2));\n\nor\nvar result = !list2.Except(list1).Any();\n\n", "Here is the simple code, so you need to check whether ls2 is a subset of ls1.\nbool isSubset = !ls2.Except(ls1).Any();\n\n" ]
[ 1, 0 ]
[]
[]
[ "c#", "integer", "list" ]
stackoverflow_0074674695_c#_integer_list.txt
Q: Vaadin 23: Howto open dialog in value change listener and execute button click? Context: In a Vaadin 23.2.8 form there is a TextField and a Button. What I want to do / expected behavior: In the ValueChangeListener of the TextField there should open a dialog. The dialog should be visible until the user closes it. The button should execute in the background (or after the user closes the dialog, which would also be fine). Unexpected behavior: When a user types something into the TextField and clicks at the button, then the dialog pops up and vanishes after a fraction of a second. And the button is not executed. What does work: When the user types something into the TextField, then leaves the TextField (by clicking somewhere outside the TextField) and then clicks the button, everything works as expected. Code / Small reproducible example: @Route("sandbox") public class SandboxView extends VerticalLayout { public SandboxView() { TextField textfield = new TextField("1. Type something"); textfield.addValueChangeListener(event -> { new Dialog(new Text("Some text in dialog")).open(); }); this.add(textfield); Button button = new Button("2. Click me"); button.addThemeVariants(ButtonVariant.LUMO_PRIMARY); button.setDisableOnClick(true); button.addClickListener(event -> { System.out.println("Button was clicked"); button.setEnabled(true); }); this.add(button); } } Questions: Is it forbidden to open a Dialog in a ValueChangeListener in Vaadin? What can I do to get the expected behavior? A: This happens because dialogs are by default configured to close when the user clicks outside the dialog. You can change this using the setCloseOnOutsideClick method. A: The approach that works for me now is to show a Notification instead of a Dialog. When opening a Dialog (even with dialog.setCloseOnOutsideClick(false) - a good hint of Leif), some other side effects occurred sometimes: E.g. the Button ClickListener didn't execute or even froze (because of setDisableOnClick(true) the Button was disabled but the ClickListener wasn't called and therefore it couldn't reenable the Button by calling setEnabled(true)). Also some other UI events didn't happen reliable when opening a Dialog, e.g. opening a Details element. Unfortunately I couldn't reproduce these effects in a small code example. With a Notification none of these side effects occurred.
Vaadin 23: Howto open dialog in value change listener and execute button click?
Context: In a Vaadin 23.2.8 form there is a TextField and a Button. What I want to do / expected behavior: In the ValueChangeListener of the TextField there should open a dialog. The dialog should be visible until the user closes it. The button should execute in the background (or after the user closes the dialog, which would also be fine). Unexpected behavior: When a user types something into the TextField and clicks at the button, then the dialog pops up and vanishes after a fraction of a second. And the button is not executed. What does work: When the user types something into the TextField, then leaves the TextField (by clicking somewhere outside the TextField) and then clicks the button, everything works as expected. Code / Small reproducible example: @Route("sandbox") public class SandboxView extends VerticalLayout { public SandboxView() { TextField textfield = new TextField("1. Type something"); textfield.addValueChangeListener(event -> { new Dialog(new Text("Some text in dialog")).open(); }); this.add(textfield); Button button = new Button("2. Click me"); button.addThemeVariants(ButtonVariant.LUMO_PRIMARY); button.setDisableOnClick(true); button.addClickListener(event -> { System.out.println("Button was clicked"); button.setEnabled(true); }); this.add(button); } } Questions: Is it forbidden to open a Dialog in a ValueChangeListener in Vaadin? What can I do to get the expected behavior?
[ "This happens because dialogs are by default configured to close when the user clicks outside the dialog. You can change this using the setCloseOnOutsideClick method.\n", "The approach that works for me now is to show a Notification instead of a Dialog.\nWhen opening a Dialog (even with dialog.setCloseOnOutsideClick(false) - a good hint of Leif), some other side effects occurred sometimes:\n\nE.g. the Button ClickListener didn't execute or even froze (because of setDisableOnClick(true) the Button was disabled but the ClickListener wasn't called and therefore it couldn't reenable the Button by calling setEnabled(true)).\nAlso some other UI events didn't happen reliable when opening a Dialog, e.g. opening a Details element.\n\nUnfortunately I couldn't reproduce these effects in a small code example.\nWith a Notification none of these side effects occurred.\n" ]
[ 1, 0 ]
[]
[]
[ "vaadin", "vaadin23", "vaadin_flow" ]
stackoverflow_0074588318_vaadin_vaadin23_vaadin_flow.txt
Q: Changing a class value of a class attribute with default 0 through instance value I am working with a certain script that calculates discount, where its default is 0, hwoever special items have varied discounts, and my challenge is that I am unable top update the discount. Here's a sample code: class Person(): def __init__(self, item, quantity, money,discount=0): self.discount=discount self.item=item self.quantity=quantity self.money=money if self.money < quantity*1000: print('Not enough money') else: self.quantity=quantity if discount == 0: self.money=self.money-self.quantity*1000 else: self.money=self.money-self.quantity*1000*(1-discount) class Privilage(Person): def __init__(self, item, quantity, money, tag): super().__init__(item, quantity, money,) self.tag=tag if self.tag == 'vip': self.discount=0.1 elif self.tag == 'vvip': self.discount=0.2 else: self.discount=0 I tried changing the numbers and checking outputs by printing self.money, but they always pass trhough discount == 0 instead on the else, whcihc should carry over the discount by class Privilage. I also tried adding other methods, and it works, it simply won't pass in the class Person. A: I think your problem here is that you are trying to define the attributes of the superclass Person by the subclass Privilage. The subclass will inherit any attributes and methods from the superclass, but not vice versa. A solution would be to move the if-else loop from Person to the Privilage class and then it works. class Person(): def __init__(self, item, quantity, money,discount=0): self.discount=discount self.item=item self.quantity=quantity self.money=money class Privilage(Person): def __init__(self, item, quantity, money, tag): super().__init__(item, quantity, money,) self.tag=tag # if loop to determine discount status if self.tag == 'vip': self.discount=0.1 elif self.tag == 'vvip': self.discount=0.2 else: self.discount=0 # if loop to check money with discount status if self.money < quantity*1000: print('Not enough money') else: self.quantity=quantity if self.discount == 0: self.money=self.money-self.quantity*1000 else: self.money=self.money-self.quantity*1000*(1-self.discount) print('-------------------------------------------------------') bob = Privilage(item='jacket', quantity=4, money=50000, tag='vip') print("Bob has:", bob.discount, bob.money) sue = Privilage(item='jacket', quantity=5, money=4000, tag=0) print("Sue has:", sue.discount, sue.money) john = Privilage(item='jacket', quantity=10, money=100000, tag='vvip') print("John has:", john.discount, john.money) Resulting output: ------------------------------------------------------- Bob has: 0.1 46400.0 Not enough money Sue has: 0 4000 John has: 0.2 92000.0
Changing a class value of a class attribute with default 0 through instance value
I am working with a certain script that calculates discount, where its default is 0, hwoever special items have varied discounts, and my challenge is that I am unable top update the discount. Here's a sample code: class Person(): def __init__(self, item, quantity, money,discount=0): self.discount=discount self.item=item self.quantity=quantity self.money=money if self.money < quantity*1000: print('Not enough money') else: self.quantity=quantity if discount == 0: self.money=self.money-self.quantity*1000 else: self.money=self.money-self.quantity*1000*(1-discount) class Privilage(Person): def __init__(self, item, quantity, money, tag): super().__init__(item, quantity, money,) self.tag=tag if self.tag == 'vip': self.discount=0.1 elif self.tag == 'vvip': self.discount=0.2 else: self.discount=0 I tried changing the numbers and checking outputs by printing self.money, but they always pass trhough discount == 0 instead on the else, whcihc should carry over the discount by class Privilage. I also tried adding other methods, and it works, it simply won't pass in the class Person.
[ "I think your problem here is that you are trying to define the attributes of the superclass Person by the subclass Privilage. The subclass will inherit any attributes and methods from the superclass, but not vice versa.\nA solution would be to move the if-else loop from Person to the Privilage class and then it works.\nclass Person():\n def __init__(self, item, quantity, money,discount=0):\n self.discount=discount\n self.item=item\n self.quantity=quantity\n self.money=money\n \nclass Privilage(Person):\n def __init__(self, item, quantity, money, tag):\n super().__init__(item, quantity, money,)\n self.tag=tag\n \n # if loop to determine discount status\n if self.tag == 'vip':\n self.discount=0.1\n elif self.tag == 'vvip':\n self.discount=0.2\n else:\n self.discount=0\n \n # if loop to check money with discount status\n if self.money < quantity*1000:\n print('Not enough money')\n else:\n self.quantity=quantity\n if self.discount == 0:\n self.money=self.money-self.quantity*1000\n else:\n self.money=self.money-self.quantity*1000*(1-self.discount)\n \n\nprint('-------------------------------------------------------')\nbob = Privilage(item='jacket', quantity=4, money=50000, tag='vip')\nprint(\"Bob has:\", bob.discount, bob.money)\n\nsue = Privilage(item='jacket', quantity=5, money=4000, tag=0)\nprint(\"Sue has:\", sue.discount, sue.money)\n\njohn = Privilage(item='jacket', quantity=10, money=100000, tag='vvip')\nprint(\"John has:\", john.discount, john.money)\n\nResulting output:\n-------------------------------------------------------\nBob has: 0.1 46400.0\nNot enough money\nSue has: 0 4000\nJohn has: 0.2 92000.0\n\n" ]
[ 0 ]
[]
[]
[ "class", "inheritance", "methods", "oop", "python" ]
stackoverflow_0074674186_class_inheritance_methods_oop_python.txt
Q: Java compiled with newer JDK does not run in older Java I wrote a fairly simple spring boot application in Java. I am on Windows, using latest intelliJ, Maven, and JDK 17. Have configured my compiler source and target to Java 11 My understanding is that I should be able to run this app in Java 11. But when I run in Java 11, I get error below: Error: LinkageError occurred while loading main class com.my.org.application.tester.Runner java.lang.UnsupportedClassVersionError: org/springframework/boot/CommandLineRunner has been compiled by a more recent version of the Java Runtime (class file version 61.0), this version of the Java Runtime only recognizes class file versions up to 55.0 I tried using Java 11 to compile but get this error when I compile: org.apache.maven.lifecycle.LifecycleExecutionException: Failed to execute goal org.apache.maven.plugins:maven-compiler-plugin:3.1:compile (default-compile) on project passport-tester: Compilation failure I am consuming spring-boot-autoconfigure 3.0.0. Have confirmed my jdk settings are correct. When running in JDK 11 I have set JAVA_HOME, path, CLASSPATH to point to JDK11 folder Is there any other setting I am missing, or should I check to make this work? A: From https://github.com/spring-projects/spring-boot/wiki/Spring-Boot-3.0-Release-Notes : Spring Boot 3.0 requires Java 17 as a minimum version.
Java compiled with newer JDK does not run in older Java
I wrote a fairly simple spring boot application in Java. I am on Windows, using latest intelliJ, Maven, and JDK 17. Have configured my compiler source and target to Java 11 My understanding is that I should be able to run this app in Java 11. But when I run in Java 11, I get error below: Error: LinkageError occurred while loading main class com.my.org.application.tester.Runner java.lang.UnsupportedClassVersionError: org/springframework/boot/CommandLineRunner has been compiled by a more recent version of the Java Runtime (class file version 61.0), this version of the Java Runtime only recognizes class file versions up to 55.0 I tried using Java 11 to compile but get this error when I compile: org.apache.maven.lifecycle.LifecycleExecutionException: Failed to execute goal org.apache.maven.plugins:maven-compiler-plugin:3.1:compile (default-compile) on project passport-tester: Compilation failure I am consuming spring-boot-autoconfigure 3.0.0. Have confirmed my jdk settings are correct. When running in JDK 11 I have set JAVA_HOME, path, CLASSPATH to point to JDK11 folder Is there any other setting I am missing, or should I check to make this work?
[ "From https://github.com/spring-projects/spring-boot/wiki/Spring-Boot-3.0-Release-Notes :\n\nSpring Boot 3.0 requires Java 17 as a minimum version.\n\n" ]
[ 0 ]
[]
[]
[ "intellij_idea", "java", "java_11", "java_17", "maven" ]
stackoverflow_0074663344_intellij_idea_java_java_11_java_17_maven.txt
Q: building AOSP 13 error:"bionic-benchmarks-glibc" depends on undefined module "libtinyxml2" AILED: out/soong/build.ninja cd "$(dirname "out/host/linux-x86/bin/soong_build")" && BUILDER="$PWD/$(basename "out/host/linux-x86/bin/soong_build")" && cd / && env -i "$BUILDER" --top "$TOP" --soong_ou t "out/soong" --out "out" -o out/soong/build.ninja --globListDir build --globFile out/soong/globs-build.ninja -t -l out/.module_paths/Android.bp.list --available_env out/soo ng/soong.environment.available --used_env out/soong/soong.environment.used.build Android.bp error: bionic/tests/Android.bp:340:1: "libBionicStandardTests" depends on undefined module "libtinyxml2" error: bionic/benchmarks/Android.bp:123:1: "bionic-benchmarks-glibc" depends on undefined module "libtinyxml2" 10:35:47 soong bootstrap failed with: exit status 1 A: You are missing the repo tinyxml2. Get it from here https://android.googlesource.com/platform/external/tinyxml2/
building AOSP 13 error:"bionic-benchmarks-glibc" depends on undefined module "libtinyxml2"
AILED: out/soong/build.ninja cd "$(dirname "out/host/linux-x86/bin/soong_build")" && BUILDER="$PWD/$(basename "out/host/linux-x86/bin/soong_build")" && cd / && env -i "$BUILDER" --top "$TOP" --soong_ou t "out/soong" --out "out" -o out/soong/build.ninja --globListDir build --globFile out/soong/globs-build.ninja -t -l out/.module_paths/Android.bp.list --available_env out/soo ng/soong.environment.available --used_env out/soong/soong.environment.used.build Android.bp error: bionic/tests/Android.bp:340:1: "libBionicStandardTests" depends on undefined module "libtinyxml2" error: bionic/benchmarks/Android.bp:123:1: "bionic-benchmarks-glibc" depends on undefined module "libtinyxml2" 10:35:47 soong bootstrap failed with: exit status 1
[ "You are missing the repo tinyxml2.\nGet it from here\nhttps://android.googlesource.com/platform/external/tinyxml2/\n" ]
[ 0 ]
[]
[]
[ "android", "android_source", "linux", "node_modules" ]
stackoverflow_0073808811_android_android_source_linux_node_modules.txt
Q: ursina loads frameanimation3d in the wrong order so this is the code loading the anim ` hand = FrameAnimation3d('assets\playerhand\hanim2\\cube_', position=(5, 2, 5), scale = 0.1, loop = False, autoplay = False, fps = 100, ) ` and this is how it loads read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_1.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_10.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_11.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_12.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_13.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_14.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_15.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_16.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_17.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_18.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_19.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_2.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_20.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_21.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_22.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_23.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_24.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_25.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_26.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_27.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_28.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_29.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_3.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_30.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_31.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_32.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_33.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_34.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_35.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_36.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_37.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_38.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_39.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_4.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_40.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_41.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_42.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_43.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_44.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_45.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_46.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_47.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_48.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_49.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_5.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_50.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_51.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_52.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_53.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_54.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_55.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_56.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_57.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_58.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_59.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_6.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_60.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_61.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_62.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_63.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_64.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_65.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_66.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_67.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_68.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_69.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_7.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_70.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_71.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_72.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_73.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_74.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_75.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_76.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_77.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_78.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_79.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_8.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_80.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_9.obj A: ok so i actually got some kind of answer which is to delete first single digit frames but its not that good because u have to delete them but it loads nice so ye
ursina loads frameanimation3d in the wrong order
so this is the code loading the anim ` hand = FrameAnimation3d('assets\playerhand\hanim2\\cube_', position=(5, 2, 5), scale = 0.1, loop = False, autoplay = False, fps = 100, ) ` and this is how it loads read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_1.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_10.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_11.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_12.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_13.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_14.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_15.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_16.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_17.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_18.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_19.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_2.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_20.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_21.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_22.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_23.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_24.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_25.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_26.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_27.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_28.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_29.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_3.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_30.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_31.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_32.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_33.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_34.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_35.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_36.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_37.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_38.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_39.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_4.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_40.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_41.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_42.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_43.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_44.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_45.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_46.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_47.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_48.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_49.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_5.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_50.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_51.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_52.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_53.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_54.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_55.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_56.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_57.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_58.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_59.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_6.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_60.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_61.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_62.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_63.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_64.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_65.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_66.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_67.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_68.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_69.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_7.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_70.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_71.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_72.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_73.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_74.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_75.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_76.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_77.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_78.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_79.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_8.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_80.obj read obj at: C:\Users\l0g1c0n\PycharmProjects\game\assets\playerhand\hanim2\cube_9.obj
[ "ok so i actually got some kind of answer which is to delete first single digit frames but its not that good because u have to delete them\nbut it loads nice so ye\n" ]
[ 0 ]
[]
[]
[ "animation", "python_3.x", "ursina" ]
stackoverflow_0074674711_animation_python_3.x_ursina.txt
Q: SQL query to filter on group of related rows I have a recurring problem in SQL queries, that I haven't been able to solve elegantly, neither in raw SQL or the Django ORM, and now I'm faced with it in EntityFramework as well. It is probably common enough to have its own name, but I don't know it. Say, I have a simple foreign key relationship between two tables, e.g. Book 1 <- * Tag A book has many tags and a tag has one book, i.e. the Tag table has a foreign key to the book table. Now, I want to find all books that have "Tag1" and "Tag2". Raw SQL I can make multiple joins SELECT * FROM books JOIN tags t1 on tags.book_id = books.id JOIN tags t2 on tags.book_id = books.id WHERE t1.tag = 'Tag1' AND t2.tag = 'Tag2' Cool, that works, but doesn't really seem performant Django In django, I could do something similar Book.objects.filter(tags__tag="Tag1").filter(tags__tag="Tag1") Changing filters like that will cause the extra joins, like in the raw SQL version EntityFramework LINQ I tried chaining .Where() similar to changing Django's .filter(), but that does not have the same result. It will build a query resembling the following, which will of course return nothing, because there is no row where the tag are two different strings SELECT * FROM books JOIN tags t1 on tags.book_id = books.id WHERE t1.tag = 'Tag1' AND t1.tag = 'Tag2' Wrapping it up I suppose I could do an array aggregate to aggregate tags into and array and compare to that, but that seems expensive too, and aggregates and grouping also have impact on the ordering of things, which forces me to do subqueries to get the order I want. I am by no means an expert in SQL, as you can plainly see, but I guess what I am hoping for is either A way to mimic the stupid nonsense above in LINQ An alternative, more elegant approach that will let me do what I need and which works well with any ORM Extra ramblings This case where I need to find books that have "all of" a list of tags is the tricky bit... If it was "any of" or "this particular one", then it would be simple. EDIT: The solution using arrays and overlap In Postgres, we can do array_agg to aggregate all related tags into an array, like this: SELECT * FROM books JOIN tags t1 on tags.book_id = books.id ; +--------+-------+------+ | BookId | Name | Tag | +--------+-------+------+ | 1 | BookA | Tag1 | | 1 | BookA | Tag2 | | 1 | BookA | Tag3 | | 2 | BookB | Tag1 | | 2 | BookB | Tag3 | +--------+-------+------+ SELECT books.BookId, Name, array_agg(t1.tags) as tags FROM books JOIN tags t1 on tags.book_id = books.id GROUP BY BookId ORDER BY BookId ; +--------+-------+--------------------+ | BookId | Name | tags | +--------+-------+--------------------+ | 1 | BookA | [Tag1, Tag2, Tag3} | | 2 | BookB | {Tag1, Tag3} | +--------+-------+--------------------+ With that, I can then use the array "contains" operator to find the row where tag overlaps with the expected set: WHERE tags @> ('Tag1', 'Tag2'). This is also a viable option. It does aggregation instead of excessive joining. Not sure what that would look like with LINQ query though A: With group by and having, we can do Tag 1 AND Tag 2: with cte_tags as ( select book_id from tags where tag in ('Tag 1', 'Tag 2') group by book_id having count(*)=2) select b.id as book_id, b.name from books b join cte_tags t on b.id = t.book_id; EDIT: If tag in tags for a book_id can be duplicated, use the following: with cte_tags as ( select book_id from tags where tag in ('Tag 1', 'Tag 2') group by book_id having count(distinct tag)=2) select b.id as book_id, b.name from books b join cte_tags t on b.id = t.book_id; If looking for tag1 OR tag2, or tag1 AND tag2, use the following: with cte_tags as ( select book_id from tags where tag in ('Tag 1', 'Tag 2') group by book_id having count(distinct tag) between 1 and 2) select b.id as book_id, b.name from books b join cte_tags t on b.id = t.book_id; A: If I understand it correctly you want books that only have tags 'Tag1' and 'Tag2'. I.e. no other tags. I'm not aware of an official name for this problem, maybe exclusive contains. It amounts to finding books meeting two conditions: have all tags in ("Tag1", "Tag2") have two unique tags (or don't have other or zero tags) You're looking for a solution that "works with any ORM". Of course, that's impossible. However, there is a LINQ solution that will probably work with any LINQ-based ORM. It works with EF anyway. This query can be used when the tag names are not unique per book: var tags = new[] { "Tag1", "Tag2" }; var books = context.Books .Where(b => b.Tags.All(t => tags.Contains(t.Tag)) && b.Tags.Select(t => t.Tag).Distinct().Count() == tags.Count()); This, if the tag names are unique per book (which I assume): var books = context.Books .Where(b => b.Tags.All(t => tags.Contains(t.Tag)) && b.Tags.Count() > 0); The second condition is necessary, otherwise books without any tags would also be selected (that's the semantics of All). This generates SQL queries that you'll have to settle with (as with any LINQ-based ORM). For the second case, using EF-core 6: SELECT [b].[Id] FROM [Books] AS [b] WHERE NOT EXISTS ( SELECT 1 FROM [Tags] AS [t] WHERE ([b].[Id] = [t].[BookId]) AND [t].[Tag] NOT IN (N'Tag1', N'Tag2')) AND (( SELECT COUNT(*) FROM [Tags] AS [t0] WHERE [b].[Id] = [t0].[BookId]) > 0) Of course it's possible to write SQL queries manually that do a (far) better job, performance-wise, in a specific constellation of indexes and statistics. Also, most (if not all) ORMs will offer a way to execute raw SQL. In that sense, a SQL query can be used with "any" ORM. I don't consider that an "ORM solution" though. It doesn't use the ORM's core machinery, merely its database connection. A: If performance is important, you should try various queries on your server with actual data and measure their performance. I have a general note. A query like this: select book_id from tags where tag in ('Tag1', 'Tag2') or like this: select book_id from tags where tag = 'Tag 1' OR tag = 'Tag2' usually would result in the scan of the whole table tags even if it had index on the tag column. On the other hand, a query like this: select book_id from tags where tag = 'Tag1' would usually use an index. So, we can expand a query with OR into two separate queries and then combine their results. WITH CTE_BookIDs AS ( select book_id from tags where tag = 'Tag1' INTERSECT select book_id from tags where tag = 'Tag2' ) SELECT books.* FROM books INNER JOIN CTE_BookIDs ON CTE_BookIDs.book_id = books.id ; Here is a query over a sample data set: CREATE TABLE #Tags (ID int IDENTITY NOT NULL PRIMARY KEY ,BookID int NOT NULL ,Tag varchar(50) NOT NULL); INSERT INTO #Tags VALUES (1, 'Tag1'), (1, 'Tag2'), (1, 'Tag3'), (1, 'Tag4'), (2, 'Tag1'), (3, 'Tag2'), (4, 'Tag1'), (4, 'Tag2'), (4, 'Tag3'), (5, 'Tag3'), (5, 'Tag4'), (5, 'Tag5'), (6, 'Tag1'), (6, 'Tag3'), (6, 'Tag5'), (7, 'Tag2'), (7, 'Tag3'), (8, 'Tag1'), (8, 'Tag2'); CREATE INDEX IX_Tag ON #Tags ( Tag, BookID ); WITH CTE_BookIDs AS ( select BookID from #Tags where tag = 'Tag1' INTERSECT select BookID from #Tags where tag = 'Tag2' ) SELECT * FROM CTE_BookIDs ; DROP TABLE #Tags; Result +--------+ | BookID | +--------+ | 1 | | 4 | | 8 | +--------+ Execution plan A: Try next solution: First create index for speed up this query create index on tags (tag, book_id); Second, check next query SELECT * FROM books JOIN tags t1 on t1.tag = 'Tag1' AND t2.book_id = books.id JOIN tags t2 on t2.tag = 'Tag2' AND t2.book_id = books.id; If you want to deal with arbitrary list of any number of tags: SELECT books.id, count(distinct tags.id) as tags_count FROM books JOIN tags on tags.tag = ANY(['Tag1', 'Tag2', ...]) AND tags.book_id = books.id GROUP BY books.id HAVING count(distinct tags.id) = <number of tags> For speed up second query check if next index wil help create index on tags (book_id, tag);
SQL query to filter on group of related rows
I have a recurring problem in SQL queries, that I haven't been able to solve elegantly, neither in raw SQL or the Django ORM, and now I'm faced with it in EntityFramework as well. It is probably common enough to have its own name, but I don't know it. Say, I have a simple foreign key relationship between two tables, e.g. Book 1 <- * Tag A book has many tags and a tag has one book, i.e. the Tag table has a foreign key to the book table. Now, I want to find all books that have "Tag1" and "Tag2". Raw SQL I can make multiple joins SELECT * FROM books JOIN tags t1 on tags.book_id = books.id JOIN tags t2 on tags.book_id = books.id WHERE t1.tag = 'Tag1' AND t2.tag = 'Tag2' Cool, that works, but doesn't really seem performant Django In django, I could do something similar Book.objects.filter(tags__tag="Tag1").filter(tags__tag="Tag1") Changing filters like that will cause the extra joins, like in the raw SQL version EntityFramework LINQ I tried chaining .Where() similar to changing Django's .filter(), but that does not have the same result. It will build a query resembling the following, which will of course return nothing, because there is no row where the tag are two different strings SELECT * FROM books JOIN tags t1 on tags.book_id = books.id WHERE t1.tag = 'Tag1' AND t1.tag = 'Tag2' Wrapping it up I suppose I could do an array aggregate to aggregate tags into and array and compare to that, but that seems expensive too, and aggregates and grouping also have impact on the ordering of things, which forces me to do subqueries to get the order I want. I am by no means an expert in SQL, as you can plainly see, but I guess what I am hoping for is either A way to mimic the stupid nonsense above in LINQ An alternative, more elegant approach that will let me do what I need and which works well with any ORM Extra ramblings This case where I need to find books that have "all of" a list of tags is the tricky bit... If it was "any of" or "this particular one", then it would be simple. EDIT: The solution using arrays and overlap In Postgres, we can do array_agg to aggregate all related tags into an array, like this: SELECT * FROM books JOIN tags t1 on tags.book_id = books.id ; +--------+-------+------+ | BookId | Name | Tag | +--------+-------+------+ | 1 | BookA | Tag1 | | 1 | BookA | Tag2 | | 1 | BookA | Tag3 | | 2 | BookB | Tag1 | | 2 | BookB | Tag3 | +--------+-------+------+ SELECT books.BookId, Name, array_agg(t1.tags) as tags FROM books JOIN tags t1 on tags.book_id = books.id GROUP BY BookId ORDER BY BookId ; +--------+-------+--------------------+ | BookId | Name | tags | +--------+-------+--------------------+ | 1 | BookA | [Tag1, Tag2, Tag3} | | 2 | BookB | {Tag1, Tag3} | +--------+-------+--------------------+ With that, I can then use the array "contains" operator to find the row where tag overlaps with the expected set: WHERE tags @> ('Tag1', 'Tag2'). This is also a viable option. It does aggregation instead of excessive joining. Not sure what that would look like with LINQ query though
[ "With group by and having, we can do Tag 1 AND Tag 2:\nwith cte_tags as (\nselect book_id\n from tags\n where tag in ('Tag 1', 'Tag 2')\n group by book_id\n having count(*)=2)\nselect b.id as book_id,\n b.name\n from books b\n join cte_tags t\n on b.id = t.book_id;\n\nEDIT:\n\nIf tag in tags for a book_id can be duplicated, use the following:\n\nwith cte_tags as (\nselect book_id\n from tags\n where tag in ('Tag 1', 'Tag 2')\n group by book_id\n having count(distinct tag)=2)\nselect b.id as book_id,\n b.name\n from books b\n join cte_tags t\n on b.id = t.book_id;\n\n\nIf looking for tag1 OR tag2, or tag1 AND tag2, use the following:\n\nwith cte_tags as (\nselect book_id\n from tags\n where tag in ('Tag 1', 'Tag 2')\n group by book_id\n having count(distinct tag) between 1 and 2)\nselect b.id as book_id,\n b.name\n from books b\n join cte_tags t\n on b.id = t.book_id;\n\n", "If I understand it correctly you want books that only have tags 'Tag1' and 'Tag2'. I.e. no other tags. I'm not aware of an official name for this problem, maybe exclusive contains.\nIt amounts to finding books meeting two conditions:\n\nhave all tags in (\"Tag1\", \"Tag2\")\nhave two unique tags (or don't have other or zero tags)\n\nYou're looking for a solution that \"works with any ORM\". Of course, that's impossible. However, there is a LINQ solution that will probably work with any LINQ-based ORM. It works with EF anyway.\nThis query can be used when the tag names are not unique per book:\nvar tags = new[] { \"Tag1\", \"Tag2\" };\nvar books = context.Books\n .Where(b => b.Tags.All(t => tags.Contains(t.Tag))\n && b.Tags.Select(t => t.Tag).Distinct().Count() == tags.Count());\n\nThis, if the tag names are unique per book (which I assume):\nvar books = context.Books\n .Where(b => b.Tags.All(t => tags.Contains(t.Tag))\n && b.Tags.Count() > 0);\n\nThe second condition is necessary, otherwise books without any tags would also be selected (that's the semantics of All).\nThis generates SQL queries that you'll have to settle with (as with any LINQ-based ORM). For the second case, using EF-core 6:\n SELECT [b].[Id]\n FROM [Books] AS [b]\n WHERE NOT EXISTS (\n SELECT 1\n FROM [Tags] AS [t]\n WHERE ([b].[Id] = [t].[BookId]) AND [t].[Tag] NOT IN (N'Tag1', N'Tag2'))\n AND ((\n SELECT COUNT(*)\n FROM [Tags] AS [t0]\n WHERE [b].[Id] = [t0].[BookId]) > 0)\n\nOf course it's possible to write SQL queries manually that do a (far) better job, performance-wise, in a specific constellation of indexes and statistics. Also, most (if not all) ORMs will offer a way to execute raw SQL. In that sense, a SQL query can be used with \"any\" ORM. I don't consider that an \"ORM solution\" though. It doesn't use the ORM's core machinery, merely its database connection.\n", "If performance is important, you should try various queries on your server with actual data and measure their performance.\nI have a general note.\nA query like this:\nselect book_id\nfrom tags\nwhere tag in ('Tag1', 'Tag2')\n\nor like this:\nselect book_id\nfrom tags\nwhere tag = 'Tag 1' OR tag = 'Tag2'\n\nusually would result in the scan of the whole table tags even if it had index on the tag column.\nOn the other hand, a query like this:\nselect book_id\nfrom tags\nwhere tag = 'Tag1'\n\nwould usually use an index.\nSo, we can expand a query with OR into two separate queries and then combine their results.\nWITH\nCTE_BookIDs\nAS\n(\n select book_id\n from tags\n where tag = 'Tag1'\n\n INTERSECT\n\n select book_id\n from tags\n where tag = 'Tag2'\n)\nSELECT\n books.*\nFROM\n books\n INNER JOIN CTE_BookIDs ON CTE_BookIDs.book_id = books.id\n;\n\nHere is a query over a sample data set:\nCREATE TABLE #Tags\n (ID int IDENTITY NOT NULL PRIMARY KEY\n ,BookID int NOT NULL\n ,Tag varchar(50) NOT NULL);\n\nINSERT INTO #Tags VALUES\n(1, 'Tag1'),\n(1, 'Tag2'),\n(1, 'Tag3'),\n(1, 'Tag4'),\n(2, 'Tag1'),\n(3, 'Tag2'),\n(4, 'Tag1'),\n(4, 'Tag2'),\n(4, 'Tag3'),\n(5, 'Tag3'),\n(5, 'Tag4'),\n(5, 'Tag5'),\n(6, 'Tag1'),\n(6, 'Tag3'),\n(6, 'Tag5'),\n(7, 'Tag2'),\n(7, 'Tag3'),\n(8, 'Tag1'),\n(8, 'Tag2');\n\nCREATE INDEX IX_Tag ON #Tags\n(\n Tag, BookID\n);\n\nWITH\nCTE_BookIDs\nAS\n(\n select BookID\n from #Tags\n where tag = 'Tag1'\n\n INTERSECT\n\n select BookID\n from #Tags\n where tag = 'Tag2'\n)\nSELECT *\nFROM CTE_BookIDs\n;\n\nDROP TABLE #Tags;\n\nResult\n+--------+\n| BookID |\n+--------+\n| 1 |\n| 4 |\n| 8 |\n+--------+\n\nExecution plan\n\n", "Try next solution:\nFirst create index for speed up this query\ncreate index on tags (tag, book_id);\n\nSecond, check next query\nSELECT * FROM books\nJOIN tags t1 on t1.tag = 'Tag1' AND t2.book_id = books.id\nJOIN tags t2 on t2.tag = 'Tag2' AND t2.book_id = books.id;\n\nIf you want to deal with arbitrary list of any number of tags:\nSELECT \n books.id,\n count(distinct tags.id) as tags_count\nFROM books\nJOIN tags on tags.tag = ANY(['Tag1', 'Tag2', ...]) AND tags.book_id = books.id\nGROUP BY books.id\nHAVING \n count(distinct tags.id) = <number of tags>\n\nFor speed up second query check if next index wil help\ncreate index on tags (book_id, tag);\n\n" ]
[ 3, 0, 0, 0 ]
[]
[]
[ "django", "linq", "sql" ]
stackoverflow_0074546712_django_linq_sql.txt
Q: Is there a way to console.log an output only when my loop finishes running and matched no entry In the code below how do i output try again only when the entire code execution runs and customer's order is not found amongst the product order in the menuItem dictionary. I want to output this only when customer input doesn't match any product code let menuItem = { item_1: { name: "french burger", price: 1000, productCode: 101 }, item_2: { name: "chicken sharwama", price: 1500, productCode: 102 }, item_3: { name: "pizza", price: 5000, productCode: 103 }, item_4: { name: "beef sharwama", price: 1500, productCode: 104 }, item_5: { name: "smoothie (mix flavor)", price: 1300, productCode: 105 } } listMenuItem = () => { for (let i in menuItem) { console.log(`Order Code: ${menuItem[i].productCode} || ${menuItem[i].name}, ${menuItem[i].price} NGN \n`) } } listMenuItem() var order = prompt("Enter product code to make your order: ") console.log(order) let customerOrder = [] for (let i in menuItem) { if (menuItem[i].productCode == order) { customerOrder.push(menuItem[i]) console.log(customerOrder) console.log(`${menuItem[i].name}, ${menuItem[i].price}`) } else { console.log("Product does not exist, try again") } } A: Your structure makes it harder to use the array methods. The result is more confusing that it would have been if you just had an object keyed on productCode let menuItem = { item_1: { name: "french burger", price: 1000, productCode: 101 }, item_2: { name: "chicken sharwama", price: 1500, productCode: 102 }, item_3: { name: "pizza", price: 5000, productCode: 103 }, item_4: { name: "beef sharwama", price: 1500, productCode: 104 }, item_5: { name: "smoothie (mix flavor)", price: 1300, productCode: 105 } } listMenuItem = () => { for (let i in menuItem) { console.log(`Order Code: ${menuItem[i].productCode} || ${menuItem[i].name}, ${menuItem[i].price} NGN \n`) } } listMenuItem() var order = +prompt("Enter product code to make your order: "); // convert the string to number or make the productCode a string in the object console.log(order) let customerOrder = Object.entries(menuItem).find(([key,{productCode}]) => productCode === order); console.log(customerOrder) if (customerOrder) { console.log(`I found ${customerOrder[1].name}, ${customerOrder[1].price}`) } else { console.log("Product does not exist, try again") }
Is there a way to console.log an output only when my loop finishes running and matched no entry
In the code below how do i output try again only when the entire code execution runs and customer's order is not found amongst the product order in the menuItem dictionary. I want to output this only when customer input doesn't match any product code let menuItem = { item_1: { name: "french burger", price: 1000, productCode: 101 }, item_2: { name: "chicken sharwama", price: 1500, productCode: 102 }, item_3: { name: "pizza", price: 5000, productCode: 103 }, item_4: { name: "beef sharwama", price: 1500, productCode: 104 }, item_5: { name: "smoothie (mix flavor)", price: 1300, productCode: 105 } } listMenuItem = () => { for (let i in menuItem) { console.log(`Order Code: ${menuItem[i].productCode} || ${menuItem[i].name}, ${menuItem[i].price} NGN \n`) } } listMenuItem() var order = prompt("Enter product code to make your order: ") console.log(order) let customerOrder = [] for (let i in menuItem) { if (menuItem[i].productCode == order) { customerOrder.push(menuItem[i]) console.log(customerOrder) console.log(`${menuItem[i].name}, ${menuItem[i].price}`) } else { console.log("Product does not exist, try again") } }
[ "Your structure makes it harder to use the array methods.\nThe result is more confusing that it would have been if you just had an object keyed on productCode\n\n\nlet menuItem = {\n item_1: {\n name: \"french burger\",\n price: 1000,\n productCode: 101\n },\n item_2: {\n name: \"chicken sharwama\",\n price: 1500,\n productCode: 102\n },\n item_3: {\n name: \"pizza\",\n price: 5000,\n productCode: 103\n },\n item_4: {\n name: \"beef sharwama\",\n price: 1500,\n productCode: 104\n },\n item_5: {\n name: \"smoothie (mix flavor)\",\n price: 1300,\n productCode: 105\n }\n}\n\nlistMenuItem = () => {\n for (let i in menuItem) {\n console.log(`Order Code: ${menuItem[i].productCode} || ${menuItem[i].name}, ${menuItem[i].price} NGN \\n`)\n }\n}\n\nlistMenuItem()\nvar order = +prompt(\"Enter product code to make your order: \"); // convert the string to number or make the productCode a string in the object\nconsole.log(order)\nlet customerOrder = Object.entries(menuItem).find(([key,{productCode}]) => productCode === order);\nconsole.log(customerOrder)\nif (customerOrder) {\n console.log(`I found ${customerOrder[1].name}, ${customerOrder[1].price}`)\n}\nelse {\n console.log(\"Product does not exist, try again\")\n}\n\n\n\n" ]
[ 0 ]
[]
[]
[ "javascript" ]
stackoverflow_0074673617_javascript.txt
Q: Bootstrap button drop-down inside responsive table not visible because of scroll I have a problem with drop-down buttons inside tables when are responsive and scroll active because the drop-down is not visible because of overflow: auto; property. How can I fix that in order to show drop-down option of button when this is collapsed? I can use some jQuery but after I have problems with scroll left-right so I decided to find another solution. I have attached a photo to understand better. Here is a small js fiddle: A: I solved myself this and I put the answer in scope to help other user that have same problem: We have an event in bootstrap and we can use that event to set overflow: inherit but this will work if you don't have the css property on your parent container. $('.table-responsive').on('show.bs.dropdown', function () { $('.table-responsive').css( "overflow", "inherit" ); }); $('.table-responsive').on('hide.bs.dropdown', function () { $('.table-responsive').css( "overflow", "auto" ); }) and this is the fiddle info: In this fiddle example works strange and I'm not sure why but in my project works just fine. A: A CSS only solution is to allow the y-axis to overflow. http://www.bootply.com/YvePJTDzI0 .table-responsive { overflow-y: visible !important; } EDIT Another CSS only solution is to responsively apply the overflow based on viewport width: @media (max-width: 767px) { .table-responsive .dropdown-menu { position: static !important; } } @media (min-width: 768px) { .table-responsive { overflow: inherit; } } https://www.codeply.com/go/D3XBvspns4 A: For reference, it's 2018 and I'm using BS4.1 Try adding data-boundary="viewport" to the button that toggles the dropdown (the one with the class dropdown-toggle). See https://getbootstrap.com/docs/4.1/components/dropdowns/#options A: I'd took a different approach, I had detached the element from the parent and set it with position absolute by jQuery Working JS fidle: http://jsfiddle.net/s270Lyrd/ The JS solution I am using. //fix menu overflow under the responsive table // hide menu on click... (This is a must because when we open a menu ) $(document).click(function (event) { //hide all our dropdowns $('.dropdown-menu[data-parent]').hide(); }); $(document).on('click', '.table-responsive [data-toggle="dropdown"]', function () { // if the button is inside a modal if ($('body').hasClass('modal-open')) { throw new Error("This solution is not working inside a responsive table inside a modal, you need to find out a way to calculate the modal Z-index and add it to the element") return true; } $buttonGroup = $(this).parent(); if (!$buttonGroup.attr('data-attachedUl')) { var ts = +new Date; $ul = $(this).siblings('ul'); $ul.attr('data-parent', ts); $buttonGroup.attr('data-attachedUl', ts); $(window).resize(function () { $ul.css('display', 'none').data('top'); }); } else { $ul = $('[data-parent=' + $buttonGroup.attr('data-attachedUl') + ']'); } if (!$buttonGroup.hasClass('open')) { $ul.css('display', 'none'); return; } dropDownFixPosition($(this).parent(), $ul); function dropDownFixPosition(button, dropdown) { var dropDownTop = button.offset().top + button.outerHeight(); dropdown.css('top', dropDownTop + "px"); dropdown.css('left', button.offset().left + "px"); dropdown.css('position', "absolute"); dropdown.css('width', dropdown.width()); dropdown.css('heigt', dropdown.height()); dropdown.css('display', 'block'); dropdown.appendTo('body'); } }); A: This solution worked great for me : @media (max-width: 767px) { .table-responsive .dropdown-menu { position: static !important; } } @media (min-width: 768px) { .table-responsive { overflow: visible; } } More detail: https://github.com/twbs/bootstrap/issues/15374 A: Define this properties. Good Luck! data-toggle="dropdown" data-boundary="window" A: my 2¢ quick global fix: // drop down in responsive table (function () { $('.table-responsive').on('shown.bs.dropdown', function (e) { var $table = $(this), $menu = $(e.target).find('.dropdown-menu'), tableOffsetHeight = $table.offset().top + $table.height(), menuOffsetHeight = $menu.offset().top + $menu.outerHeight(true); if (menuOffsetHeight > tableOffsetHeight) $table.css("padding-bottom", menuOffsetHeight - tableOffsetHeight); }); $('.table-responsive').on('hide.bs.dropdown', function () { $(this).css("padding-bottom", 0); }) })(); Explications: When a dropdown-menu inside a '.table-responsive' is shown, it calculate the height of the table and expand it (with padding) to match the height required to display the menu. The menu can be any size. In my case, this is not the table that has the '.table-responsive' class, it's a wrapping div: <div class="table-responsive" style="overflow:auto;"> <table class="table table-hover table-bordered table-condensed server-sort"> So the $table var in the script is actually a div! (just to be clear... or not) :) Note: I wrap it in a function so my IDE can collapse function ;) but it's not mandatory! A: I have a solution using only CSS, just use position relative for dropdowns inside the table-responsive: @media (max-width: 767px) { .table-responsive .dropdown-menu { position: relative; /* Sometimes needs !important */ } } https://codepen.io/leocaseiro/full/rKxmpz/ A: This has been fixed in Bootstrap v4.1 and above by adding data-boundary="viewport" (Bootstrap Dropdowns Docs) But for earlier versions (v4.0 and below), I found this javascript snippet that works perfectly. It works for small tables and scrolling tables: $('.table-responsive').on('shown.bs.dropdown', function (e) { var t = $(this), m = $(e.target).find('.dropdown-menu'), tb = t.offset().top + t.height(), mb = m.offset().top + m.outerHeight(true), d = 20; // Space for shadow + scrollbar. if (t[0].scrollWidth > t.innerWidth()) { if (mb + d > tb) { t.css('padding-bottom', ((mb + d) - tb)); } } else { t.css('overflow', 'visible'); } }).on('hidden.bs.dropdown', function () { $(this).css({'padding-bottom': '', 'overflow': ''}); }); A: Cleaned up @Wazime solution a little. Works great as a general solution. $(document).on('shown.bs.dropdown', '.table-responsive', function (e) { // The .dropdown container var $container = $(e.target); // Find the actual .dropdown-menu var $dropdown = $container.find('.dropdown-menu'); if ($dropdown.length) { // Save a reference to it, so we can find it after we've attached it to the body $container.data('dropdown-menu', $dropdown); } else { $dropdown = $container.data('dropdown-menu'); } $dropdown.css('top', ($container.offset().top + $container.outerHeight()) + 'px'); $dropdown.css('left', $container.offset().left + 'px'); $dropdown.css('position', 'absolute'); $dropdown.css('display', 'block'); $dropdown.appendTo('body'); }); $(document).on('hide.bs.dropdown', '.table-responsive', function (e) { // Hide the dropdown menu bound to this button $(e.target).data('dropdown-menu').css('display', 'none'); }); A: Bootstrap 5 Solution This is what worked best for me: .table-responsive .dropdown, .table-responsive .btn-group, .table-responsive .btn-group-vertical { position: static; } A: Try it once. after 1 hour of research on net I found Best Solution for this Problem. Solution:- just add script (function () { // hold onto the drop down menu var dropdownMenu; // and when you show it, move it to the body $(window).on('show.bs.dropdown', function (e) { // grab the menu dropdownMenu = $(e.target).find('.dropdown-menu'); // detach it and append it to the body $('body').append(dropdownMenu.detach()); // grab the new offset position var eOffset = $(e.target).offset(); // make sure to place it where it would normally go (this could be improved) dropdownMenu.css({ 'display': 'block', 'top': eOffset.top + $(e.target).outerHeight(), 'left': eOffset.left }); }); // and when you hide it, reattach the drop down, and hide it normally $(window).on('hide.bs.dropdown', function (e) { $(e.target).append(dropdownMenu.detach()); dropdownMenu.hide(); }); })(); OUTPUT:- A: SIMPLE css only solution Rather than modifying the parent table, Here I have a simple solution The idea is to add z-index to the <td></td> that holds your dropdown. So that it will be on top of all other elements. <td style="position: absolute; z-index: 10; width: 20%;"></td> A: Burebistaruler response works ok for me on ios8 (iphone4s) but doen't woks on android that before was working. What i've donne that Works for me on ios8 (iphone4s) and andoir is: $('.table-responsive').on('show.bs.dropdown', function () { $('.table-responsive').css( "min-height", "400px" ); }); $('.table-responsive').on('hide.bs.dropdown', function () { $('.table-responsive').css( "min-height", "none" ); }) A: Based on the accepted answer and the answer of @LeoCaseiro here is what I ended up using in my case : @media (max-width: 767px) { .table-responsive{ overflow-x: auto; overflow-y: auto; } } @media (min-width: 767px) { .table-responsive{ overflow: inherit !important; /* Sometimes needs !important */ } } on big screens the dropdown won't be hidden behind the reponsive-table and in small screen it will be hidden but it's ok because there is scrolls bar in mobile anyway. Hope this help someone. A: The recommended and chosen solution, is not always the best solution. Unfortunately its the solution linkedin recently used and it creates multiple scrollbars on the page based on the situation. My method was slightly different. I contained the table-responsive div in another div. Then I applied height 100%, width:100%, display block and position absolute so the height and width is based on the page size, and set overflow to hidden. Then on the table responsive div I added a min-height of 100% <div class="table_container" style="height: 100%; width: 100%; display: block;position: absolute;overflow: hidden;"> <div class="table-responsive" style="min-height:100%;"> As you can see in the working example below, no added scroll bars, no funny behavior, and practically as its using percentages - it should work regardless of screen size. I have not testing this for that however. If that fails for some reason, one can replace 100% with 100vh and 100vw respectively. <!-- Latest compiled and minified CSS --> <link rel="stylesheet" href="//maxcdn.bootstrapcdn.com/bootstrap/3.3.5/css/bootstrap.min.css"> <!-- Optional theme --> <link rel="stylesheet" href="//maxcdn.bootstrapcdn.com/bootstrap/3.3.5/css/bootstrap-theme.min.css"> <script src="https://code.jquery.com/jquery-1.12.4.min.js"></script> <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/js/bootstrap.min.js"></script> <div class="table_container" style="height: 100%; width: 100%; display: block;position: absolute;overflow: hidden;"> <div class="table-responsive" style="min-height:100%;"> <table class="table"> <thead> <tr> <th>Value1</th> <th>Value2</th> <th>Value3</th> <th>Value4</th> </tr> </thead> <tbody> <tr> <td> DATA <div class="btn-group btn-group-rounded"> <button type="button" class="btn btn-default btn-xs" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" style="border-radius:3px;"> <span class="caret"></span> </button> <ul class="dropdown-menu"> <li><a href="#">One</a></li> <li><a href="#">Two</a></li> <li><a href="#">Three</a></li> <li role="seperator" class="divider"></li> <li><a href="#">Four</a></li> </ul> </div> </td> <td> DATA <div class="btn-group btn-group-rounded"> <button type="button" class="btn btn-default btn-xs" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" style="border-radius:3px;"> <span class="caret"></span> </button> <ul class="dropdown-menu"> <li><a href="#">One</a></li> <li><a href="#">Two</a></li> <li><a href="#">Three</a></li> <li role="seperator" class="divider"></li> <li><a href="#">Four</a></li> </ul> </div> </td> <td> DATA <div class="btn-group btn-group-rounded"> <button type="button" class="btn btn-default btn-xs" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" style="border-radius:3px;"> <span class="caret"></span> </button> <ul class="dropdown-menu"> <li><a href="#">One</a></li> <li><a href="#">Two</a></li> <li><a href="#">Three</a></li> <li role="seperator" class="divider"></li> <li><a href="#">Four</a></li> </ul> </div> </td> <td>DATA</td> </tr> <tr> <td> DATA <div class="btn-group btn-group-rounded"> <button type="button" class="btn btn-default btn-xs" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" style="border-radius:3px;"> <span class="caret"></span> </button> <ul class="dropdown-menu"> <li><a href="#">One</a></li> <li><a href="#">Two</a></li> <li><a href="#">Three</a></li> <li role="seperator" class="divider"></li> <li><a href="#">Four</a></li> </ul> </div> </td> <td> DATA <div class="btn-group btn-group-rounded"> <button type="button" class="btn btn-default btn-xs" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" style="border-radius:3px;"> <span class="caret"></span> </button> <ul class="dropdown-menu"> <li><a href="#">One</a></li> <li><a href="#">Two</a></li> <li><a href="#">Three</a></li> <li role="seperator" class="divider"></li> <li><a href="#">Four</a></li> </ul> </div> </td> <td> DATA <div class="btn-group btn-group-rounded"> <button type="button" class="btn btn-default btn-xs" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false" style="border-radius:3px;"> <span class="caret"></span> </button> <ul class="dropdown-menu"> <li><a href="#">One</a></li> <li><a href="#">Two</a></li> <li><a href="#">Three</a></li> <li role="seperator" class="divider"></li> <li><a href="#">Four</a></li> </ul> </div> </td> <td>DATA</td> </tr> </tbody> </table> </div> </div> A: another solution is .table-responsive{ min-height: 400px; } A: The solution for me was this: .table-responsive { min-height: 300px; } .table-responsive, .table { overflow-y: visible !important; } A: I've done some research and all the answers didn't solve it for me but they did sort-of point me in the right direction. The boundary was already set to "window". My <tbody> had a position: relative;. The dropdown-menu has a position: absolute;, but kept "relative" to the tbody, which caused the problems. I changed the <tbody> to position: static;, which fixed my problem without any JavaScript, and the table is still responsive. I'm using Bootstrap 4 by the way. A: My simple JS solution without change global css rules. Note: '.table-scrollable' you might need replace to '.table-responsive' $('.table-scrollable').on('show.bs.dropdown', function (e) { //get button position offset = $(e.relatedTarget).offset() //get button height heigth = $(e.relatedTarget).outerHeight() //append dropdown to body and perpare position. $(e.relatedTarget).next('.dropdown-menu').addClass('dropdown-menu-in-table').appendTo("body").css({display:'block',top:offset.top+heigth, left: offset.left}); }); //move back dropdown menu to button and remove positon $('body').on('hide.bs.dropdown', function (e) { $(this).find('.dropdown-menu-in-table').removeClass('dropdown-menu-in-table').css({display:'',top:'', left: ''}).appendTo($(e.relatedTarget).parent()); }); A: This could be useful for someone else. I'm using DatatablesJS. I add 500px to the current height of the table. I do this because Datatables allow you to use 10, 20, etc pages in your table. So I need to calculate dinamically the table's height. When dropdown is shown, I add extra height. When dropdown is hiden, I reset original table's height. $(document).ready(function() { $('.table-responsive .dropdown').on('shown.bs.dropdown', function () { console.log($('#table-responsive-cliente').height() + 500) $("#table-responsive-cliente").css("height",$('#table-responsive-cliente').height() + 500 ); }) $('.table-responsive .dropdown').on('hide.bs.dropdown', function () { $("#table-responsive-cliente").css("height","auto"); }) }) And the HTML <div class="table-responsive" id="table-responsive-cliente"> <table class="table-striped table-hover"> .... .... </table> </div> Before: After dropdown is shown: A: We solved this issue here at work by applying a .dropup class to the dropdown when the dropdown is close to the bottom of a table.enter image description here A: This worked for me in Bootstrap 4 since it has different breakpoints than v3: @media (min-width: 992px) { .table-responsive { overflow: inherit; } } A: Well, reading the top answer, i saw that it really dont works when you are seeing the scroll bar and the toggle button was on last column (in my case) or other column that is unseen But, if you change 'inherit' for 'hidden' it will work. $('.table-responsive').on('show.bs.dropdown', function () { $('.table-responsive').css( "overflow", "hidden" ); }).on('hide.bs.dropdown', function () { $('.table-responsive').css( "overflow", "auto" ); }) Try to do that way. A: Using Bootstrap 5.2, with large tables that need to be responsive, this solution recently posted on Github (Nov 2022) - worked brilliantly for me: I call the following javascript after rendering the drop-downs the first time (Blazor Server): const dropdowns = document.querySelectorAll('.dropdown-toggle') const dropdown = [...dropdowns].map((dropdownToggleEl) => new bootstrap.Dropdown(dropdownToggleEl, { popperConfig(defaultBsPopperConfig) { return { ...defaultBsPopperConfig, strategy: 'fixed' }; } })); Drop-downs can now expand outside of the table-responsive wrapper - without affecting the vertical size of the table or division - and works for both large and small screens.
Bootstrap button drop-down inside responsive table not visible because of scroll
I have a problem with drop-down buttons inside tables when are responsive and scroll active because the drop-down is not visible because of overflow: auto; property. How can I fix that in order to show drop-down option of button when this is collapsed? I can use some jQuery but after I have problems with scroll left-right so I decided to find another solution. I have attached a photo to understand better. Here is a small js fiddle:
[ "I solved myself this and I put the answer in scope to help other user that have same problem: We have an event in bootstrap and we can use that event to set overflow: inherit but this will work if you don't have the css property on your parent container.\n$('.table-responsive').on('show.bs.dropdown', function () {\n $('.table-responsive').css( \"overflow\", \"inherit\" );\n});\n\n$('.table-responsive').on('hide.bs.dropdown', function () {\n $('.table-responsive').css( \"overflow\", \"auto\" );\n})\n\nand this is the fiddle\ninfo: In this fiddle example works strange and I'm not sure why but in my project works just fine.\n", "A CSS only solution is to allow the y-axis to overflow.\nhttp://www.bootply.com/YvePJTDzI0\n.table-responsive {\n overflow-y: visible !important;\n}\n\nEDIT\nAnother CSS only solution is to responsively apply the overflow based on viewport width: \n@media (max-width: 767px) {\n .table-responsive .dropdown-menu {\n position: static !important;\n }\n}\n@media (min-width: 768px) {\n .table-responsive {\n overflow: inherit;\n }\n}\n\nhttps://www.codeply.com/go/D3XBvspns4\n", "For reference, it's 2018 and I'm using BS4.1\nTry adding data-boundary=\"viewport\" to the button that toggles the dropdown (the one with the class dropdown-toggle). See https://getbootstrap.com/docs/4.1/components/dropdowns/#options\n", "I'd took a different approach, I had detached the element from the parent and set it with position absolute by jQuery\nWorking JS fidle:\nhttp://jsfiddle.net/s270Lyrd/ \n\nThe JS solution I am using. \n//fix menu overflow under the responsive table \n// hide menu on click... (This is a must because when we open a menu )\n$(document).click(function (event) {\n //hide all our dropdowns\n $('.dropdown-menu[data-parent]').hide();\n\n});\n$(document).on('click', '.table-responsive [data-toggle=\"dropdown\"]', function () {\n // if the button is inside a modal\n if ($('body').hasClass('modal-open')) {\n throw new Error(\"This solution is not working inside a responsive table inside a modal, you need to find out a way to calculate the modal Z-index and add it to the element\")\n return true;\n }\n\n $buttonGroup = $(this).parent();\n if (!$buttonGroup.attr('data-attachedUl')) {\n var ts = +new Date;\n $ul = $(this).siblings('ul');\n $ul.attr('data-parent', ts);\n $buttonGroup.attr('data-attachedUl', ts);\n $(window).resize(function () {\n $ul.css('display', 'none').data('top');\n });\n } else {\n $ul = $('[data-parent=' + $buttonGroup.attr('data-attachedUl') + ']');\n }\n if (!$buttonGroup.hasClass('open')) {\n $ul.css('display', 'none');\n return;\n }\n dropDownFixPosition($(this).parent(), $ul);\n function dropDownFixPosition(button, dropdown) {\n var dropDownTop = button.offset().top + button.outerHeight();\n dropdown.css('top', dropDownTop + \"px\");\n dropdown.css('left', button.offset().left + \"px\");\n dropdown.css('position', \"absolute\");\n\n dropdown.css('width', dropdown.width());\n dropdown.css('heigt', dropdown.height());\n dropdown.css('display', 'block');\n dropdown.appendTo('body');\n }\n});\n\n", "This solution worked great for me :\n@media (max-width: 767px) {\n .table-responsive .dropdown-menu {\n position: static !important;\n }\n}\n@media (min-width: 768px) {\n .table-responsive {\n overflow: visible;\n }\n}\n\nMore detail: https://github.com/twbs/bootstrap/issues/15374\n", "Define this properties. Good Luck!\ndata-toggle=\"dropdown\" data-boundary=\"window\"\n\n", "my 2¢ quick global fix:\n// drop down in responsive table\n\n(function () {\n $('.table-responsive').on('shown.bs.dropdown', function (e) {\n var $table = $(this),\n $menu = $(e.target).find('.dropdown-menu'),\n tableOffsetHeight = $table.offset().top + $table.height(),\n menuOffsetHeight = $menu.offset().top + $menu.outerHeight(true);\n\n if (menuOffsetHeight > tableOffsetHeight)\n $table.css(\"padding-bottom\", menuOffsetHeight - tableOffsetHeight);\n });\n\n $('.table-responsive').on('hide.bs.dropdown', function () {\n $(this).css(\"padding-bottom\", 0);\n })\n})();\n\nExplications:\nWhen a dropdown-menu inside a '.table-responsive' is shown, it calculate the height of the table and expand it (with padding) to match the height required to display the menu. The menu can be any size.\nIn my case, this is not the table that has the '.table-responsive' class, it's a wrapping div:\n<div class=\"table-responsive\" style=\"overflow:auto;\">\n <table class=\"table table-hover table-bordered table-condensed server-sort\">\n\nSo the $table var in the script is actually a div! (just to be clear... or not) :)\nNote: I wrap it in a function so my IDE can collapse function ;) but it's not mandatory!\n", "I have a solution using only CSS, just use position relative for dropdowns inside the table-responsive:\n@media (max-width: 767px) {\n .table-responsive .dropdown-menu {\n position: relative; /* Sometimes needs !important */\n }\n}\n\nhttps://codepen.io/leocaseiro/full/rKxmpz/\n", "This has been fixed in Bootstrap v4.1 and above by adding data-boundary=\"viewport\" (Bootstrap Dropdowns Docs)\nBut for earlier versions (v4.0 and below), I found this javascript snippet that works perfectly. It works for small tables and scrolling tables:\n$('.table-responsive').on('shown.bs.dropdown', function (e) {\n var t = $(this),\n m = $(e.target).find('.dropdown-menu'),\n tb = t.offset().top + t.height(),\n mb = m.offset().top + m.outerHeight(true),\n d = 20; // Space for shadow + scrollbar.\n if (t[0].scrollWidth > t.innerWidth()) {\n if (mb + d > tb) {\n t.css('padding-bottom', ((mb + d) - tb));\n }\n }\n else {\n t.css('overflow', 'visible');\n }\n}).on('hidden.bs.dropdown', function () {\n $(this).css({'padding-bottom': '', 'overflow': ''});\n});\n\n", "Cleaned up @Wazime solution a little. Works great as a general solution.\n$(document).on('shown.bs.dropdown', '.table-responsive', function (e) {\n // The .dropdown container\n var $container = $(e.target);\n\n // Find the actual .dropdown-menu\n var $dropdown = $container.find('.dropdown-menu');\n if ($dropdown.length) {\n // Save a reference to it, so we can find it after we've attached it to the body\n $container.data('dropdown-menu', $dropdown);\n } else {\n $dropdown = $container.data('dropdown-menu');\n }\n\n $dropdown.css('top', ($container.offset().top + $container.outerHeight()) + 'px');\n $dropdown.css('left', $container.offset().left + 'px');\n $dropdown.css('position', 'absolute');\n $dropdown.css('display', 'block');\n $dropdown.appendTo('body');\n});\n\n$(document).on('hide.bs.dropdown', '.table-responsive', function (e) {\n // Hide the dropdown menu bound to this button\n $(e.target).data('dropdown-menu').css('display', 'none');\n});\n\n", "Bootstrap 5 Solution\nThis is what worked best for me:\n.table-responsive .dropdown,\n.table-responsive .btn-group,\n.table-responsive .btn-group-vertical {\n position: static;\n}\n\n", "Try it once. after 1 hour of research on net I found Best Solution for this Problem.\nSolution:- just add script\n(function () {\n // hold onto the drop down menu \n var dropdownMenu;\n\n // and when you show it, move it to the body \n $(window).on('show.bs.dropdown', function (e) {\n\n // grab the menu \n dropdownMenu = $(e.target).find('.dropdown-menu');\n\n // detach it and append it to the body\n $('body').append(dropdownMenu.detach());\n\n // grab the new offset position\n var eOffset = $(e.target).offset();\n\n // make sure to place it where it would normally go (this could be improved)\n dropdownMenu.css({\n 'display': 'block',\n 'top': eOffset.top + $(e.target).outerHeight(),\n 'left': eOffset.left\n });\n });\n\n // and when you hide it, reattach the drop down, and hide it normally \n $(window).on('hide.bs.dropdown', function (e) {\n $(e.target).append(dropdownMenu.detach());\n dropdownMenu.hide();\n });\n})();\n\nOUTPUT:-\n\n", "SIMPLE css only solution\nRather than modifying the parent table, Here I have a simple solution\nThe idea is to add z-index to the <td></td> that holds your dropdown. So that it will be on top of all other elements.\n<td style=\"position: absolute; z-index: 10; width: 20%;\"></td>\n\n", "Burebistaruler response works ok for me on ios8 (iphone4s) but doen't woks on android that before was working.\nWhat i've donne that Works for me on ios8 (iphone4s) and andoir is:\n$('.table-responsive').on('show.bs.dropdown', function () {\n $('.table-responsive').css( \"min-height\", \"400px\" );\n});\n\n$('.table-responsive').on('hide.bs.dropdown', function () {\n $('.table-responsive').css( \"min-height\", \"none\" );\n})\n\n", "Based on the accepted answer and the answer of @LeoCaseiro here is what I ended up using in my case :\n@media (max-width: 767px) {\n .table-responsive{\n overflow-x: auto;\n overflow-y: auto;\n }\n}\n@media (min-width: 767px) {\n .table-responsive{\n overflow: inherit !important; /* Sometimes needs !important */\n }\n}\n\non big screens the dropdown won't be hidden behind the reponsive-table and in small screen it will be hidden but it's ok because there is scrolls bar in mobile anyway.\nHope this help someone.\n", "The recommended and chosen solution, is not always the best solution. Unfortunately its the solution linkedin recently used and it creates multiple scrollbars on the page based on the situation. \nMy method was slightly different. \nI contained the table-responsive div in another div. Then I applied height 100%, width:100%, display block and position absolute so the height and width is based on the page size, and set overflow to hidden. \nThen on the table responsive div I added a min-height of 100%\n<div class=\"table_container\" \n style=\"height: 100%; width: 100%; display: block;position: absolute;overflow: hidden;\">\n<div class=\"table-responsive\" style=\"min-height:100%;\">\n\nAs you can see in the working example below, no added scroll bars, no funny behavior, and practically as its using percentages - it should work regardless of screen size. I have not testing this for that however. If that fails for some reason, one can replace 100% with 100vh and 100vw respectively.\n\n\n<!-- Latest compiled and minified CSS -->\r\n<link rel=\"stylesheet\" href=\"//maxcdn.bootstrapcdn.com/bootstrap/3.3.5/css/bootstrap.min.css\">\r\n\r\n<!-- Optional theme -->\r\n<link rel=\"stylesheet\" href=\"//maxcdn.bootstrapcdn.com/bootstrap/3.3.5/css/bootstrap-theme.min.css\">\r\n\r\n <script src=\"https://code.jquery.com/jquery-1.12.4.min.js\"></script>\r\n<script src=\"https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/js/bootstrap.min.js\"></script>\r\n\r\n\r\n<div class=\"table_container\" style=\"height: 100%; width: 100%; display: block;position: absolute;overflow: hidden;\">\r\n<div class=\"table-responsive\" style=\"min-height:100%;\">\r\n <table class=\"table\">\r\n <thead>\r\n <tr>\r\n <th>Value1</th>\r\n <th>Value2</th>\r\n <th>Value3</th>\r\n <th>Value4</th>\r\n </tr>\r\n </thead>\r\n <tbody>\r\n <tr>\r\n <td>\r\n DATA\r\n <div class=\"btn-group btn-group-rounded\">\r\n <button type=\"button\" class=\"btn btn-default btn-xs\" data-toggle=\"dropdown\" aria-haspopup=\"true\" aria-expanded=\"false\" style=\"border-radius:3px;\">\r\n <span class=\"caret\"></span>\r\n </button>\r\n <ul class=\"dropdown-menu\">\r\n <li><a href=\"#\">One</a></li>\r\n <li><a href=\"#\">Two</a></li>\r\n <li><a href=\"#\">Three</a></li>\r\n <li role=\"seperator\" class=\"divider\"></li>\r\n <li><a href=\"#\">Four</a></li>\r\n </ul>\r\n </div>\r\n </td>\r\n\r\n <td>\r\n DATA\r\n <div class=\"btn-group btn-group-rounded\">\r\n <button type=\"button\" class=\"btn btn-default btn-xs\" data-toggle=\"dropdown\" aria-haspopup=\"true\" aria-expanded=\"false\" style=\"border-radius:3px;\">\r\n <span class=\"caret\"></span>\r\n </button>\r\n <ul class=\"dropdown-menu\">\r\n <li><a href=\"#\">One</a></li>\r\n <li><a href=\"#\">Two</a></li>\r\n <li><a href=\"#\">Three</a></li>\r\n <li role=\"seperator\" class=\"divider\"></li>\r\n <li><a href=\"#\">Four</a></li>\r\n </ul>\r\n </div>\r\n </td>\r\n <td>\r\n DATA\r\n <div class=\"btn-group btn-group-rounded\">\r\n <button type=\"button\" class=\"btn btn-default btn-xs\" data-toggle=\"dropdown\" aria-haspopup=\"true\" aria-expanded=\"false\" style=\"border-radius:3px;\">\r\n <span class=\"caret\"></span>\r\n </button>\r\n <ul class=\"dropdown-menu\">\r\n <li><a href=\"#\">One</a></li>\r\n <li><a href=\"#\">Two</a></li>\r\n <li><a href=\"#\">Three</a></li>\r\n <li role=\"seperator\" class=\"divider\"></li>\r\n <li><a href=\"#\">Four</a></li>\r\n </ul>\r\n </div>\r\n </td>\r\n <td>DATA</td>\r\n </tr>\r\n <tr>\r\n <td>\r\n DATA\r\n <div class=\"btn-group btn-group-rounded\">\r\n <button type=\"button\" class=\"btn btn-default btn-xs\" data-toggle=\"dropdown\" aria-haspopup=\"true\" aria-expanded=\"false\" style=\"border-radius:3px;\">\r\n <span class=\"caret\"></span>\r\n </button>\r\n <ul class=\"dropdown-menu\">\r\n <li><a href=\"#\">One</a></li>\r\n <li><a href=\"#\">Two</a></li>\r\n <li><a href=\"#\">Three</a></li>\r\n <li role=\"seperator\" class=\"divider\"></li>\r\n <li><a href=\"#\">Four</a></li> </ul>\r\n </div>\r\n </td>\r\n\r\n <td>\r\n DATA\r\n <div class=\"btn-group btn-group-rounded\">\r\n <button type=\"button\" class=\"btn btn-default btn-xs\" data-toggle=\"dropdown\" aria-haspopup=\"true\" aria-expanded=\"false\" style=\"border-radius:3px;\">\r\n <span class=\"caret\"></span>\r\n </button>\r\n <ul class=\"dropdown-menu\">\r\n <li><a href=\"#\">One</a></li>\r\n <li><a href=\"#\">Two</a></li>\r\n <li><a href=\"#\">Three</a></li>\r\n <li role=\"seperator\" class=\"divider\"></li>\r\n <li><a href=\"#\">Four</a></li>\r\n </ul>\r\n </div>\r\n </td>\r\n <td>\r\n DATA\r\n <div class=\"btn-group btn-group-rounded\">\r\n <button type=\"button\" class=\"btn btn-default btn-xs\" data-toggle=\"dropdown\" aria-haspopup=\"true\" aria-expanded=\"false\" style=\"border-radius:3px;\">\r\n <span class=\"caret\"></span>\r\n </button>\r\n <ul class=\"dropdown-menu\">\r\n <li><a href=\"#\">One</a></li>\r\n <li><a href=\"#\">Two</a></li>\r\n <li><a href=\"#\">Three</a></li>\r\n <li role=\"seperator\" class=\"divider\"></li>\r\n <li><a href=\"#\">Four</a></li>\r\n </ul>\r\n </div>\r\n </td>\r\n <td>DATA</td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </div>\r\n</div>\n\n\n\n", "another solution is\n.table-responsive{\n min-height: 400px;\n}\n\n", "The solution for me was this:\n.table-responsive {\n min-height: 300px;\n}\n\n.table-responsive, .table {\n overflow-y: visible !important;\n}\n\n", "I've done some research and all the answers didn't solve it for me but they did sort-of point me in the right direction.\nThe boundary was already set to \"window\".\nMy <tbody> had a position: relative;. The dropdown-menu has a position: absolute;, but kept \"relative\" to the tbody, which caused the problems.\nI changed the <tbody> to position: static;, which fixed my problem without any JavaScript, and the table is still responsive.\n\nI'm using Bootstrap 4 by the way.\n", "My simple JS solution without change global css rules.\nNote: '.table-scrollable' you might need replace to '.table-responsive'\n$('.table-scrollable').on('show.bs.dropdown', function (e) {\n //get button position\n offset = $(e.relatedTarget).offset() \n\n //get button height\n heigth = $(e.relatedTarget).outerHeight()\n\n //append dropdown to body and perpare position.\n $(e.relatedTarget).next('.dropdown-menu').addClass('dropdown-menu-in-table').appendTo(\"body\").css({display:'block',top:offset.top+heigth, left: offset.left});\n});\n\n//move back dropdown menu to button and remove positon\n$('body').on('hide.bs.dropdown', function (e) { \n $(this).find('.dropdown-menu-in-table').removeClass('dropdown-menu-in-table').css({display:'',top:'', left: ''}).appendTo($(e.relatedTarget).parent());\n});\n\n", "This could be useful for someone else. I'm using DatatablesJS. I add 500px to the current height of the table. I do this because Datatables allow you to use 10, 20, etc pages in your table. So I need to calculate dinamically the table's height. \nWhen dropdown is shown, I add extra height.\nWhen dropdown is hiden, I reset original table's height.\n$(document).ready(function() {\n $('.table-responsive .dropdown').on('shown.bs.dropdown', function () {\n console.log($('#table-responsive-cliente').height() + 500)\n $(\"#table-responsive-cliente\").css(\"height\",$('#table-responsive-cliente').height() + 500 );\n })\n\n $('.table-responsive .dropdown').on('hide.bs.dropdown', function () {\n $(\"#table-responsive-cliente\").css(\"height\",\"auto\");\n })\n})\n\nAnd the HTML\n<div class=\"table-responsive\" id=\"table-responsive-cliente\">\n <table class=\"table-striped table-hover\">\n ....\n\n ....\n </table>\n</div>\n\nBefore:\n\nAfter dropdown is shown:\n\n", "We solved this issue here at work by applying a .dropup class to the dropdown when the dropdown is close to the bottom of a table.enter image description here\n", "This worked for me in Bootstrap 4 since it has different breakpoints than v3:\n@media (min-width: 992px) {\n .table-responsive {\n overflow: inherit;\n }\n}\n\n", "Well, reading the top answer, i saw that it really dont works when you are seeing the scroll bar and the toggle button was on last column (in my case) or other column that is unseen\n\nBut, if you change 'inherit' for 'hidden' it will work.\n$('.table-responsive').on('show.bs.dropdown', function () {\n $('.table-responsive').css( \"overflow\", \"hidden\" );\n}).on('hide.bs.dropdown', function () {\n $('.table-responsive').css( \"overflow\", \"auto\" );\n})\n\n\nTry to do that way.\n", "Using Bootstrap 5.2, with large tables that need to be responsive, this solution recently posted on Github (Nov 2022) - worked brilliantly for me:\nI call the following javascript after rendering the drop-downs the first time (Blazor Server):\nconst dropdowns = document.querySelectorAll('.dropdown-toggle')\nconst dropdown = [...dropdowns].map((dropdownToggleEl) => new bootstrap.Dropdown(dropdownToggleEl, {\n popperConfig(defaultBsPopperConfig) {\n return { ...defaultBsPopperConfig, strategy: 'fixed' };\n }\n}));\n\nDrop-downs can now expand outside of the table-responsive wrapper - without affecting the vertical size of the table or division - and works for both large and small screens.\n" ]
[ 71, 50, 50, 18, 17, 13, 12, 11, 8, 5, 5, 4, 3, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0 ]
[ "Inside bootstrap.css search the next code:\n.fixed-table-body {\n overflow-x: auto;\n overflow-y: auto;\n height: 100%;\n}\n\n...and update with this:\n.fixed-table-body {\n overflow-x: visible;\n overflow-y: visible;\n height: 100%;\n}\n\n", "Simply Use This\n.table-responsive {\n overflow: inherit;\n}\n\nIt works on Chrome, but not IE10 or Edge because inherit property is not supported\n", "In my case, this works fine:\n.table-responsive {\n overflow-y: visible !important;\n}\n\n", "As long as people still stuck in this issue and we are in 2020 already. I get a pure CSS solution by giving the drop down menu a flex display\nthis snippet works great with datatable-scroll-wrap class \n.datatable-scroll-wrap .dropdown.dropup.open .dropdown-menu {\n display: flex;\n}\n.datatable-scroll-wrap .dropdown.dropup.open .dropdown-menu li a {\n display: flex;\n}\n\n" ]
[ -1, -1, -1, -1 ]
[ "css", "javascript", "jquery", "overflow", "twitter_bootstrap" ]
stackoverflow_0026018756_css_javascript_jquery_overflow_twitter_bootstrap.txt
Q: Banner ad error: The ad size and ad unit ID must be set before loadAd is called It is not a duplicate question. I already tried solutions from similiar questions but none of them worked. My Google Ad Banner code is below: <androidx.constraintlayout.widget.ConstraintLayout xmlns:android="http://schemas.android.com/apk/res/android" xmlns:ads="http://schemas.android.com/apk/res-auto" xmlns:app="http://schemas.android.com/apk/res-auto" xmlns:tools="http://schemas.android.com/tools" android:id="@+id/konstraitLayout" android:layout_width="match_parent" android:layout_height="match_parent" android:background="#FFFFFF" android:padding="0dp" app:layoutDescription="@xml/activity_main_scene" tools:context=".MainActivity"> ... <com.google.android.gms.ads.AdView xmlns:ads="http://schemas.android.com/apk/res-auto" android:id="@+id/adView" android:layout_width="wrap_content" android:layout_height="wrap_content" ads:adSize="Banner" ads:adUnitId="ca-app-pub-3940256099942544/6300978111" app:layout_constraintBottom_toBottomOf="parent" app:layout_constraintEnd_toEndOf="parent" app:layout_constraintStart_toStartOf="parent" /> ... </androidx.constraintlayout.widget.ConstraintLayout> The code in MainActivity: adView = findViewById(R.id.adView); AdRequest adRequest = new AdRequest.Builder().build(); adView.loadAd(adRequest); So I added my banner in XML. I found it in java and then called it, but I guess I am missing something since I've got the following error: The ad size and ad unit ID must be set before loadAd is called Thanks for answer. A: The adSize tag should have its value passed in capital letters. Replace this: ads:adSize="Banner" with this: ads:adSize="BANNER"
Banner ad error: The ad size and ad unit ID must be set before loadAd is called
It is not a duplicate question. I already tried solutions from similiar questions but none of them worked. My Google Ad Banner code is below: <androidx.constraintlayout.widget.ConstraintLayout xmlns:android="http://schemas.android.com/apk/res/android" xmlns:ads="http://schemas.android.com/apk/res-auto" xmlns:app="http://schemas.android.com/apk/res-auto" xmlns:tools="http://schemas.android.com/tools" android:id="@+id/konstraitLayout" android:layout_width="match_parent" android:layout_height="match_parent" android:background="#FFFFFF" android:padding="0dp" app:layoutDescription="@xml/activity_main_scene" tools:context=".MainActivity"> ... <com.google.android.gms.ads.AdView xmlns:ads="http://schemas.android.com/apk/res-auto" android:id="@+id/adView" android:layout_width="wrap_content" android:layout_height="wrap_content" ads:adSize="Banner" ads:adUnitId="ca-app-pub-3940256099942544/6300978111" app:layout_constraintBottom_toBottomOf="parent" app:layout_constraintEnd_toEndOf="parent" app:layout_constraintStart_toStartOf="parent" /> ... </androidx.constraintlayout.widget.ConstraintLayout> The code in MainActivity: adView = findViewById(R.id.adView); AdRequest adRequest = new AdRequest.Builder().build(); adView.loadAd(adRequest); So I added my banner in XML. I found it in java and then called it, but I guess I am missing something since I've got the following error: The ad size and ad unit ID must be set before loadAd is called Thanks for answer.
[ "The adSize tag should have its value passed in capital letters. Replace this:\nads:adSize=\"Banner\"\n\nwith this:\nads:adSize=\"BANNER\"\n\n" ]
[ 1 ]
[]
[]
[ "ads", "android" ]
stackoverflow_0074674678_ads_android.txt
Q: where ID NOT IN - Typeorm -nestJs using queryBuilder I wanted to use TypeOrm's queryBuilder in order to convert from a raw sql query Room id capacity price description 3 | 4 | 1000 | 2 windows, 2 big size bed 4 | 6 | 1500 | 1 windows, 3 big size bed Reservation id guestId checkIn checkOut price guest roomId 1 | 1 | 2022-12-01 12:29:01.718 |2022-12-03 12:29:01.718 | 1000| 4 | 3 I want to get the available rooms filter by checkIn and checkOut date This is my sql query: SELECT * FROM Room WHERE ID NOT IN (SELECT reservation."roomId" FROM reservation WHERE '2022-12-11T12:29:01.718Z' < reservation."checkOut" AND '2022-12-12T02:48:40.664Z' > reservation."checkIn") How can i change this into queryBuilder, plese help Much appreciated. I try to find some documents where i can change it to queryBuilder but i cannot find any things suggest to using "NOT IN" statements, plese help. A: If I understood your question correctly you are looking for something like this import { getRepository } from 'typeorm'; const checkIn = new Date('2022-12-11T12:29:01.718Z'); const checkOut = new Date('2022-12-12T02:48:40.664Z'); const roomRepository = getRepository(Room); const rooms = await roomRepository .createQueryBuilder('room') .where(`room.ID NOT IN ( SELECT reservation.roomId FROM reservation WHERE :checkOut > reservation.checkOut AND :checkIn < reservation.checkIn )`, { checkIn, checkOut }) .getMany(); In this code, we first import the getRepository function from TypeORM. We then use this function to get the repository for the Room entity, which allows us to create a query builder for the Room table. We use the createQueryBuilder method on the repository to create a query builder for the room alias. We then use the where method on the query builder to specify the WHERE clause of the query. This method takes a string containing the WHERE clause and an object containing the values for the parameters in the query.
where ID NOT IN - Typeorm -nestJs using queryBuilder
I wanted to use TypeOrm's queryBuilder in order to convert from a raw sql query Room id capacity price description 3 | 4 | 1000 | 2 windows, 2 big size bed 4 | 6 | 1500 | 1 windows, 3 big size bed Reservation id guestId checkIn checkOut price guest roomId 1 | 1 | 2022-12-01 12:29:01.718 |2022-12-03 12:29:01.718 | 1000| 4 | 3 I want to get the available rooms filter by checkIn and checkOut date This is my sql query: SELECT * FROM Room WHERE ID NOT IN (SELECT reservation."roomId" FROM reservation WHERE '2022-12-11T12:29:01.718Z' < reservation."checkOut" AND '2022-12-12T02:48:40.664Z' > reservation."checkIn") How can i change this into queryBuilder, plese help Much appreciated. I try to find some documents where i can change it to queryBuilder but i cannot find any things suggest to using "NOT IN" statements, plese help.
[ "If I understood your question correctly you are looking for something like this\nimport { getRepository } from 'typeorm';\n\nconst checkIn = new Date('2022-12-11T12:29:01.718Z');\nconst checkOut = new Date('2022-12-12T02:48:40.664Z');\n\nconst roomRepository = getRepository(Room);\n\nconst rooms = await roomRepository\n .createQueryBuilder('room')\n .where(`room.ID NOT IN (\n SELECT reservation.roomId \n FROM reservation\n WHERE :checkOut > reservation.checkOut \n AND :checkIn < reservation.checkIn\n )`, { checkIn, checkOut })\n .getMany();\n\nIn this code, we first import the getRepository function from TypeORM. We then use this function to get the repository for the Room entity, which allows us to create a query builder for the Room table.\nWe use the createQueryBuilder method on the repository to create a query builder for the room alias. We then use the where method on the query builder to specify the WHERE clause of the query. This method takes a string containing the WHERE clause and an object containing the values for the parameters in the query.\n" ]
[ 0 ]
[]
[]
[ "mysql", "nestjs", "node.js", "query_builder", "typeorm" ]
stackoverflow_0074674503_mysql_nestjs_node.js_query_builder_typeorm.txt
Q: How to tell React Query fetchQuery to make a new GET request and not used the already cached response? I have the following function that makes a GET request for my user information and caches it using react query's fetchQuery so that every call after the first one does not make a GET request and instead just pulls the data from the cache. export const getSelf = async (): Promise<UserData> => await queryClient.fetchQuery(['getSelf'], async () => { try { const { data } = await request.get('/users/me'); // @todo: This sideloads a bunch of stuff, that we could cache return data; } catch (error) { throw new Error('Failed to fetch user information'); } }); The problem is that now I actually want to make a new GET request in order to check if the user data has changed, but calling getSelf() pulls from the cache. How can I instruct fetchQuery to make a fresh GET request and not used the cache? A: In case of using fetchQuery, you can set cacheTime to 0 in query options, so every time you call it, it will suggest that cache is outdated and fetch fresh data, but I'd suggest you to use useQuery. Here you can read about difference between useQuery and fetchQuery The best way is to use useQuery hook and invalidate that query. import { useQueryClient } from '@tanstack/react-query' // Get QueryClient from the context const queryClient = useQueryClient() queryClient.invalidateQueries({ queryKey: ['getSelf'] }) After invalidation, it will immediately fetch fresh data. A: A slight modification to your function will allow you to first invalidate the query (which will remove it from the cache). export const getSelf = async (skipCache = false) => { if(skipCache) { queryClient.invalidateQueries(['getSelf']); } return queryClient.fetchQuery(['getSelf'], async () => { try { const { data } = await request.get('/users/me'); // @todo: This sideloads a bunch of stuff, that we could cache return data; } catch (error) { throw new Error('Failed to fetch user information'); } }); } A: fetchQuery will always fetch unless there is data in the cache that is considered fresh. This is determined by the staleTime setting. staleTime defaults to 0 - which means "immediately stale". So the code you are showing that is calling fetchQuery should always fetch - unless you have a global staleTime set. You're not showing this in your code, but I guess this must be the reason. Note that fetchQuery doesn't know about staleTime being set by other observers (created by useQuery). Now if you have a globally set staleTime and that is affecting fetchQuery, but you still want to always fetch, the best thing you can do is pass staleTime: 0 directly to fetchQuery. Again, this is the default behaviour, so it's only necessary if you have a global staleTime set: await queryClient.fetchQuery( ['getSelf'], async () => { ... }, { staleTime: 0 } )
How to tell React Query fetchQuery to make a new GET request and not used the already cached response?
I have the following function that makes a GET request for my user information and caches it using react query's fetchQuery so that every call after the first one does not make a GET request and instead just pulls the data from the cache. export const getSelf = async (): Promise<UserData> => await queryClient.fetchQuery(['getSelf'], async () => { try { const { data } = await request.get('/users/me'); // @todo: This sideloads a bunch of stuff, that we could cache return data; } catch (error) { throw new Error('Failed to fetch user information'); } }); The problem is that now I actually want to make a new GET request in order to check if the user data has changed, but calling getSelf() pulls from the cache. How can I instruct fetchQuery to make a fresh GET request and not used the cache?
[ "In case of using fetchQuery, you can set cacheTime to 0 in query options, so every time you call it, it will suggest that cache is outdated and fetch fresh data, but I'd suggest you to use useQuery.\nHere you can read about difference between useQuery and fetchQuery\nThe best way is to use useQuery hook and invalidate that query.\nimport { useQueryClient } from '@tanstack/react-query'\n\n// Get QueryClient from the context\nconst queryClient = useQueryClient()\n\nqueryClient.invalidateQueries({ queryKey: ['getSelf'] })\n\nAfter invalidation, it will immediately fetch fresh data.\n", "A slight modification to your function will allow you to first invalidate the query (which will remove it from the cache).\nexport const getSelf = async (skipCache = false) => {\n if(skipCache) { queryClient.invalidateQueries(['getSelf']); }\n \n return queryClient.fetchQuery(['getSelf'], async () => {\n try {\n const { data } = await request.get('/users/me');\n\n // @todo: This sideloads a bunch of stuff, that we could cache\n return data;\n } catch (error) {\n throw new Error('Failed to fetch user information');\n }\n });\n}\n\n", "fetchQuery will always fetch unless there is data in the cache that is considered fresh. This is determined by the staleTime setting.\nstaleTime defaults to 0 - which means \"immediately stale\". So the code you are showing that is calling fetchQuery should always fetch - unless you have a global staleTime set. You're not showing this in your code, but I guess this must be the reason. Note that fetchQuery doesn't know about staleTime being set by other observers (created by useQuery).\nNow if you have a globally set staleTime and that is affecting fetchQuery, but you still want to always fetch, the best thing you can do is pass staleTime: 0 directly to fetchQuery. Again, this is the default behaviour, so it's only necessary if you have a global staleTime set:\nawait queryClient.fetchQuery(\n ['getSelf'],\n async () => { ... },\n { staleTime: 0 }\n)\n\n" ]
[ 1, 1, 0 ]
[]
[]
[ "javascript", "react_query", "reactjs" ]
stackoverflow_0074603819_javascript_react_query_reactjs.txt
Q: Making an optional Argument in discord py I want to make a code where if no one is mentioned then it should consider the person using it as a target. Here is what I tried so far. `@bot.command() async def bal(ctx,*,member: discord.Member= None): if member == None: member = ctx.message.author purse = db.get(member) await ctx.send(purse) ` But it's not working A: first of all it should be ctx.author not ctx.message.author and u should put the * at the end I think it should like sth like this @bot.command() async def bal(ctx,member: discord.Member= None,*): if member == None: member = ctx.author purse = db.get(member) await ctx.send(purse) A: using * means that you are forcing the user to input keyword only arguements, but in your case it's not needed and i think is probably giving you an error since it's a bare * so following on what bishoy did @bot.command() async def bal(ctx,member: discord.Member= None): if member == None: member = ctx.author purse = db.get(member) await ctx.send(purse) this will remove the error, no need for the *
Making an optional Argument in discord py
I want to make a code where if no one is mentioned then it should consider the person using it as a target. Here is what I tried so far. `@bot.command() async def bal(ctx,*,member: discord.Member= None): if member == None: member = ctx.message.author purse = db.get(member) await ctx.send(purse) ` But it's not working
[ "first of all it should be\nctx.author\nnot\nctx.message.author\nand u should put the * at the end I think\nit should like sth like this\[email protected]()\nasync def bal(ctx,member: discord.Member= None,*):\n if member == None:\n member = ctx.author\n purse = db.get(member)\n await ctx.send(purse)\n\n", "using * means that you are forcing the user to input keyword only arguements, but in your case it's not needed and i think is probably giving you an error since it's a bare *\nso following on what bishoy did\[email protected]()\nasync def bal(ctx,member: discord.Member= None):\n if member == None:\n member = ctx.author\n purse = db.get(member)\n await ctx.send(purse)\n\nthis will remove the error, no need for the *\n" ]
[ 0, 0 ]
[]
[]
[ "discord", "discord.py" ]
stackoverflow_0074674190_discord_discord.py.txt
Q: Start / Resume Generator without using next Is there a way to continue a function based on where it was last run. We want each call to do something else, e.g. (first call adds 1, second adds 2, third call adds 3), and then do something else. def a_generator(): yield lambda x: x + 1 yield lambda x: x + 2 yield lambda x: x + 3 yield lambda x: f"Okay we are almost complete {x}" generator = a_generator() What currently works: assert next(generator)(5) == 6 assert next(generator)(5) == 7 assert next(generator)(5) == 8 assert next(generator)(5) == "Okay we are almost complete 5" What I want to be able to do: assert generator(5) == 6 assert generator(5) == 7 assert generator(5) == 8 assert generator(5) == "Okay we are almost complete 5" A: Your code does that already, but consider that you have a generator that returns functions, and treat it accordingly: def a_generator(): yield lambda x: x + 1 yield lambda x: x + 2 yield lambda x: x + 3 yield lambda x: f"Okay we are almost complete {x}" for generator in a_generator(): print(generator(5)) 6 7 8 Okay we are almost complete 5 I'm not sure if this is exactly what you want, but it seems pretty close so I'll leave it here unless you can narrow down the requirements. Fundamentally, a generator already is a function that remembers where it was. But you have a generator generating other functions (which are not generators). A: So the solution was that I needed to create a generator consumer helper function, and run that instead. generator = a_generator() def generator_consumer(x, generator=generator): try: return next(generator)(x) except StopIteration: raise ValueError("Can't Run the generator anymore") And having the kwarg generator=generator means I can just pass args as normal, without needing to specify the generator. Then I can call the function without calling next. assert generator_consumer(5) == 6 assert generator_consumer(5) == 7 assert generator_consumer(5) == 8 assert generator_consumer(5) == "Okay we are almost complete 5"
Start / Resume Generator without using next
Is there a way to continue a function based on where it was last run. We want each call to do something else, e.g. (first call adds 1, second adds 2, third call adds 3), and then do something else. def a_generator(): yield lambda x: x + 1 yield lambda x: x + 2 yield lambda x: x + 3 yield lambda x: f"Okay we are almost complete {x}" generator = a_generator() What currently works: assert next(generator)(5) == 6 assert next(generator)(5) == 7 assert next(generator)(5) == 8 assert next(generator)(5) == "Okay we are almost complete 5" What I want to be able to do: assert generator(5) == 6 assert generator(5) == 7 assert generator(5) == 8 assert generator(5) == "Okay we are almost complete 5"
[ "Your code does that already, but consider that you have a generator that returns functions, and treat it accordingly:\ndef a_generator():\n yield lambda x: x + 1\n yield lambda x: x + 2\n yield lambda x: x + 3\n yield lambda x: f\"Okay we are almost complete {x}\"\n\nfor generator in a_generator():\n print(generator(5))\n\n\n6\n7\n8\nOkay we are almost complete 5\n\nI'm not sure if this is exactly what you want, but it seems pretty close so I'll leave it here unless you can narrow down the requirements.\nFundamentally, a generator already is a function that remembers where it was. But you have a generator generating other functions (which are not generators).\n", "So the solution was that I needed to create a generator consumer helper function, and run that instead.\ngenerator = a_generator()\n\ndef generator_consumer(x, generator=generator):\n try:\n return next(generator)(x)\n except StopIteration:\n raise ValueError(\"Can't Run the generator anymore\")\n\nAnd having the kwarg generator=generator means I can just pass args as normal, without needing to specify the generator.\nThen I can call the function without calling next.\nassert generator_consumer(5) == 6\nassert generator_consumer(5) == 7\nassert generator_consumer(5) == 8\nassert generator_consumer(5) == \"Okay we are almost complete 5\"\n\n" ]
[ 1, 0 ]
[]
[]
[ "python" ]
stackoverflow_0074663305_python.txt
Q: Missing Browse button in PuTTY software I have downloaded PuTTY software to SSH into my AWS EC2 instance. The problem am facing is the browse button that am supposed to click on to access the private key is missing under the Auth category. I downloaded PuTTY from putty.org Below is an image of where the browse button should have been. enter image description here I have uninstalled and reinstalled PuTTY but each time I open PuTTY, the browse button is missing. A: You are probably looking at a picture from an older version of PuTTY. The Configuring PuTTY documentation says that the "Private key file for authentication" is available in the Credentials panel (not the Auth panel).
Missing Browse button in PuTTY software
I have downloaded PuTTY software to SSH into my AWS EC2 instance. The problem am facing is the browse button that am supposed to click on to access the private key is missing under the Auth category. I downloaded PuTTY from putty.org Below is an image of where the browse button should have been. enter image description here I have uninstalled and reinstalled PuTTY but each time I open PuTTY, the browse button is missing.
[ "You are probably looking at a picture from an older version of PuTTY.\nThe Configuring PuTTY documentation says that the \"Private key file for authentication\" is available in the Credentials panel (not the Auth panel).\n" ]
[ 0 ]
[ "The \"Browse\" button in Putty software is used to open a file browser window and select a file or directory. If the \"Browse\" button is missing in Putty, it may be because the feature is not enabled or the Putty version you are using does not support it.\nTo enable the \"Browse\" button in Putty, you can follow these steps:\n\nOpen Putty and go to the \"Session\" tab.\nIn the \"Host Name\" field,\nenter the hostname or IP address of the server you want to connect\nto.\nIn the \"Connection type\" dropdown, select \"SSH\" or the protocol\nyou want to use to connect to the server.\nClick on the \"Connection\"\ntab in the left panel.\nIn the \"Seconds between keepalives\" field,\nenter a value greater than 0. This will enable the \"Browse\" button\nin Putty.\nClick on the \"Apply\" button to save the changes.\n\nIf the \"Browse\" button is still not visible, it may be because the Putty version you are using does not support it. In this case, you can try upgrading to the latest version of Putty, or you can use a different file transfer tool, such as FileZilla or WinSCP, to transfer files to and from the server.\n" ]
[ -1 ]
[ "putty" ]
stackoverflow_0074674158_putty.txt
Q: RuntimeWarning: overflow encountered in exp predictions = 1 / (1 + np.exp(-predictions)) this is the code I'm trying to implement for the dataset file and as I mentioned before the result just gives a 0 and the error : RuntimeWarning: overflow encountered in exp predictions = 1 / (1 + np.exp(-predictions)) I tried many solutions for other codes related with this prediction but still the same `import numpy as np import pandas as pd dataset = pd.read_csv('data.csv') dataset = (dataset - dataset.mean()) / dataset.std() from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(dataset.iloc[:, :-1], dataset.iloc[:, -1], test_size=0.25, random_state=42) def logisticRegression_model(X, y, learning_rate, num_epochs): weights = np.zeros(X.shape[1]) for epoch in range(num_epochs): logisticRegression_update_weights(X, y, weights, learning_rate) return weights def logisticRegression_update_weights(X, y, weights, learning_rate): gradient = logisticRegression_calculate_gradient(X, y, weights) weights += learning_rate * gradient return weights def logisticRegression_calculate_gradient(X, y, weights): #calculating the predictions predictions = logisticRegression_predict(X, weights) #calculating the errors error = y - predictions gradient = np.dot(X.T, error) return gradient def logisticRegression_predict(X, weights): predictions = np.dot(X, weights) predictions = 1 / (1 + np.exp(-predictions)) return predictions def logisticRegression_accuracy(y_true, y_pred): accuracy = np.sum(y_true == y_pred) / len(y_true) return accuracy def logisticRegression_train(X_train, y_train, learning_rate, num_epochs): weights = logisticRegression_model(X_train, y_train, learning_rate, num_epochs) return weights weights = logisticRegression_train(X_train, y_train, 0.1, 1000) y_pred_train = logisticRegression_predict(X_train, weights) y_pred_test = logisticRegression_predict(X_test, weights) y_pred_train = (y_pred_train > 0.5).astype(int) y_pred_test = (y_pred_test > 0.5).astype(int) acc_train = logisticRegression_accuracy(y_train, y_pred_train) acc_test = logisticRegression_accuracy(y_test, y_pred_test) print('Train accuracy:', acc_train) print('Test accuracy:', acc_test)` A: The RuntimeWarning: overflow encountered in exp warning indicates that the exp function in NumPy has encountered an overflow error. This means that the input value to the exp function is too large, and the function cannot compute the exponential of this value. The exp function in NumPy computes the exponential of a given input value. The exponential function is defined as exp(x) = e^x, where e is the base of the natural logarithm and x is the input value. When the input value is too large, the exp function can encounter an overflow error because the result of the computation is too large to be represented as a floating-point number. To avoid the RuntimeWarning: overflow encountered in exp warning, you can use the numpy.clip function to limit the input values to the exp function within a certain range. The numpy.clip function allows you to specify a minimum and maximum value for the input, and any input values outside this range will be clipped to the minimum or maximum value. Here is an example of how to use the numpy.clip function to avoid the RuntimeWarning: overflow encountered in exp warning: import numpy as np # Define a large input value x = 1e100 # Compute the exponential of the input value y = np.exp(x) # Print the result print(y) In this example, the input value x is set to a large value (1e100), and the exp function is used to compute the exponential of this value. When you run this program, it will output the result of the computation, which is inf (infinity), as shown below: inf However, this program will also generate the RuntimeWarning: overflow encountered in exp warning because the input value is too large for the exp function to compute. To avoid this warning, you can use the numpy.clip function to limit the input value to the exp function within a certain range. Here is an example of how to do this: import numpy as np # Define a large input value x = 1e100 # Use the numpy.clip function to limit the input value x = np.clip(x, -np.inf, np.inf) # Compute the exponential of the input value y = np.exp(x) # Print the result print(y) In this example, the numpy.clip function is used to limit the input value x within the range (-inf, inf). This ensures that the input value is not too large for the exp function to compute. When you run this program, it will output the same result as before (inf), but it will not generate the RuntimeWarning: overflow encountered in exp warning because the input value is now within a valid range for the exp function. I hope this helps you understand the RuntimeWarning: overflow encountered in exp warning and how to avoid it using the numpy.clip function in NumPy. Let me know if you have any other questions or need any further assistance. A: This warning occurs because the exponential function exceeds the maximum value accepted for Floating Point (FP) numbers. FP numbers have a limited number of bits to store their exponent in scientific notation, so they can eventually overflow. This warning is relatively common, and it has no serious consequences (numpy is smart enough to handle the situation, and realize if the number actually corresponds to inf, nan, 0, etc.). You can even supress the warning message as follows: import numpy as np import warnings warnings.filterwarnings('ignore') print(1/np.exp(999999999999)) https://www.statology.org/runtimewarning-overflow-encountered-in-exp/#:~:text=This%20warning%20occurs%20when%20you,provides%20the%20warning%20by%20default. Unfortunately, the issue in the OP code is related to another problem (that is not giving the right result). PS. If you wrote a code where warnings should not occur at all (because they are related to numerical issues, bugs, etc), you can also transform all numpy warnings into system errors: numpy.seterr(all='raise') Now the previous code would crash: print(1/np.exp(999999999999)) FloatingPointError: overflow encountered in exp
RuntimeWarning: overflow encountered in exp predictions = 1 / (1 + np.exp(-predictions))
this is the code I'm trying to implement for the dataset file and as I mentioned before the result just gives a 0 and the error : RuntimeWarning: overflow encountered in exp predictions = 1 / (1 + np.exp(-predictions)) I tried many solutions for other codes related with this prediction but still the same `import numpy as np import pandas as pd dataset = pd.read_csv('data.csv') dataset = (dataset - dataset.mean()) / dataset.std() from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(dataset.iloc[:, :-1], dataset.iloc[:, -1], test_size=0.25, random_state=42) def logisticRegression_model(X, y, learning_rate, num_epochs): weights = np.zeros(X.shape[1]) for epoch in range(num_epochs): logisticRegression_update_weights(X, y, weights, learning_rate) return weights def logisticRegression_update_weights(X, y, weights, learning_rate): gradient = logisticRegression_calculate_gradient(X, y, weights) weights += learning_rate * gradient return weights def logisticRegression_calculate_gradient(X, y, weights): #calculating the predictions predictions = logisticRegression_predict(X, weights) #calculating the errors error = y - predictions gradient = np.dot(X.T, error) return gradient def logisticRegression_predict(X, weights): predictions = np.dot(X, weights) predictions = 1 / (1 + np.exp(-predictions)) return predictions def logisticRegression_accuracy(y_true, y_pred): accuracy = np.sum(y_true == y_pred) / len(y_true) return accuracy def logisticRegression_train(X_train, y_train, learning_rate, num_epochs): weights = logisticRegression_model(X_train, y_train, learning_rate, num_epochs) return weights weights = logisticRegression_train(X_train, y_train, 0.1, 1000) y_pred_train = logisticRegression_predict(X_train, weights) y_pred_test = logisticRegression_predict(X_test, weights) y_pred_train = (y_pred_train > 0.5).astype(int) y_pred_test = (y_pred_test > 0.5).astype(int) acc_train = logisticRegression_accuracy(y_train, y_pred_train) acc_test = logisticRegression_accuracy(y_test, y_pred_test) print('Train accuracy:', acc_train) print('Test accuracy:', acc_test)`
[ "The RuntimeWarning: overflow encountered in exp warning indicates that the exp function in NumPy has encountered an overflow error. This means that the input value to the exp function is too large, and the function cannot compute the exponential of this value.\nThe exp function in NumPy computes the exponential of a given input value. The exponential function is defined as exp(x) = e^x, where e is the base of the natural logarithm and x is the input value. When the input value is too large, the exp function can encounter an overflow error because the result of the computation is too large to be represented as a floating-point number.\nTo avoid the RuntimeWarning: overflow encountered in exp warning, you can use the numpy.clip function to limit the input values to the exp function within a certain range. The numpy.clip function allows you to specify a minimum and maximum value for the input, and any input values outside this range will be clipped to the minimum or maximum value.\nHere is an example of how to use the numpy.clip function to avoid the RuntimeWarning: overflow encountered in exp warning:\nimport numpy as np\n\n# Define a large input value\nx = 1e100\n\n# Compute the exponential of the input value\ny = np.exp(x)\n\n# Print the result\nprint(y)\n\n\nIn this example, the input value x is set to a large value (1e100), and the exp function is used to compute the exponential of this value. When you run this program, it will output the result of the computation, which is inf (infinity), as shown below:\ninf\n\nHowever, this program will also generate the RuntimeWarning: overflow encountered in exp warning because the input value is too large for the exp function to compute.\nTo avoid this warning, you can use the numpy.clip function to limit the input value to the exp function within a certain range. Here is an example of how to do this:\nimport numpy as np\n\n# Define a large input value\nx = 1e100\n\n# Use the numpy.clip function to limit the input value\nx = np.clip(x, -np.inf, np.inf)\n\n# Compute the exponential of the input value\ny = np.exp(x)\n\n# Print the result\nprint(y)\n\n\nIn this example, the numpy.clip function is used to limit the input value x within the range (-inf, inf). This ensures that the input value is not too large for the exp function to compute. When you run this program, it will output the same result as before (inf), but it will not generate the RuntimeWarning: overflow encountered in exp warning because the input value is now within a valid range for the exp function.\nI hope this helps you understand the RuntimeWarning: overflow encountered in exp warning and how to avoid it using the numpy.clip function in NumPy. Let me know if you have any other questions or need any further assistance.\n", "This warning occurs because the exponential function exceeds the maximum value accepted for Floating Point (FP) numbers. FP numbers have a limited number of bits to store their exponent in scientific notation, so they can eventually overflow.\nThis warning is relatively common, and it has no serious consequences (numpy is smart enough to handle the situation, and realize if the number actually corresponds to inf, nan, 0, etc.).\nYou can even supress the warning message as follows:\nimport numpy as np\nimport warnings\nwarnings.filterwarnings('ignore')\nprint(1/np.exp(999999999999))\n\nhttps://www.statology.org/runtimewarning-overflow-encountered-in-exp/#:~:text=This%20warning%20occurs%20when%20you,provides%20the%20warning%20by%20default.\nUnfortunately, the issue in the OP code is related to another problem (that is not giving the right result).\n\nPS. If you wrote a code where warnings should not occur at all (because they are related to numerical issues, bugs, etc), you can also transform all numpy warnings into system errors:\nnumpy.seterr(all='raise') \n\nNow the previous code would crash:\nprint(1/np.exp(999999999999))\nFloatingPointError: overflow encountered in exp\n\n" ]
[ 1, 0 ]
[]
[]
[ "logistic_regression", "python" ]
stackoverflow_0074674245_logistic_regression_python.txt
Q: Members parse into undefined instead of default value, if they missed in JSON string It's really expensive to transfer whole model from server to client specially when loading lists. The solution is to ignore empty, null or default values which can be refilled in client side. Consider a Customer model like this: export class Customer { customerId: number = 0; ncode: string = 'some default text'; name: string = ''; age: int = 20; isActive: boolean = false; birthday: Date | null = null; // And much more fields ... } We have same model in server side and the server will ignore null and default fields in serialization. For example, this is a customer sent from server: "{'customerId':1234}" What happens is that the object created by angular http get, is a Customer with only customerId field! All other fields are undefined. Is there any solution to fix it globally with no third party library to force angular to value missing fields with their default values? Update: Here is the request code: export class CustomerComponent implements OnInit { constructor(private http: HttpClient) { } ngOnInit(): void { this.http.get<Customer>('/api/getCustomer').subscribe((response) => { console.log(response); // {'customerId':12345} }); } } Update 2: As I thought there may be a global solution, I made a simple example. But we have a generic function to handle all api requests, which is generic and returns <T>(<any>response) public request<T>(api: string, method: string = 'GET', formData: any): Observable<T>{ return new Observable((observer: Observer<T>) => { header = new HttpHeaders({/*...*/}); this.http.request<T>(new HttpRequest(method, api, JSON.stringify(formData), {headers: header})) .subscribe({next: (response) => { observer.next(<T>(<any>response)); // <= This line must be changed I think }}); } } A: When you receive the data from the backend you could do the following trick: Instantiate a new object of the desired class: It will automatically contain your default-values. Merge the newly created object with the incoming data, using the spread-operator. ngOnInit(): void { this.http.get<Customer>('/api/getCustomer').pipe( // Instantiate an object with default-values // and merge it with the retrieved partial-object: map(res =>({ ...new Customer(), ...res } as Customer)) ) .subscribe((response) => { console.log(response); }); } A: Explained Using Customer class as type for the http request does not influence the runtime code. Http resonse will not automatically create a new instance of your Customer class and assign the values to it. Types in typescript are just used for development purpose (compiler time) and will not influence the runtime code results. Checkout w3school introduction. TypeScript uses compile time type checking. Which means it checks if the specified types match before running the code, not while running the code. Solution To get a Customer with your defaults you could create a new instance of the Customer which has the static default you have defined and assign only the http resonse values that are set. Something like : this.http.get<Customer>('/api/getCustomer').subscribe((response) => { const customer = {...new Customer(), ...response}; // use spread operator to merge the two objects (defaults, response values) console.log(customer); // response will be a Customer instance }); Using spread operator inspired by kellermat solution.
Members parse into undefined instead of default value, if they missed in JSON string
It's really expensive to transfer whole model from server to client specially when loading lists. The solution is to ignore empty, null or default values which can be refilled in client side. Consider a Customer model like this: export class Customer { customerId: number = 0; ncode: string = 'some default text'; name: string = ''; age: int = 20; isActive: boolean = false; birthday: Date | null = null; // And much more fields ... } We have same model in server side and the server will ignore null and default fields in serialization. For example, this is a customer sent from server: "{'customerId':1234}" What happens is that the object created by angular http get, is a Customer with only customerId field! All other fields are undefined. Is there any solution to fix it globally with no third party library to force angular to value missing fields with their default values? Update: Here is the request code: export class CustomerComponent implements OnInit { constructor(private http: HttpClient) { } ngOnInit(): void { this.http.get<Customer>('/api/getCustomer').subscribe((response) => { console.log(response); // {'customerId':12345} }); } } Update 2: As I thought there may be a global solution, I made a simple example. But we have a generic function to handle all api requests, which is generic and returns <T>(<any>response) public request<T>(api: string, method: string = 'GET', formData: any): Observable<T>{ return new Observable((observer: Observer<T>) => { header = new HttpHeaders({/*...*/}); this.http.request<T>(new HttpRequest(method, api, JSON.stringify(formData), {headers: header})) .subscribe({next: (response) => { observer.next(<T>(<any>response)); // <= This line must be changed I think }}); } }
[ "When you receive the data from the backend you could do the following trick:\n\nInstantiate a new object of the desired class: It will automatically contain your default-values.\nMerge the newly created object with the incoming data, using the spread-operator.\n\nngOnInit(): void { \n this.http.get<Customer>('/api/getCustomer').pipe(\n // Instantiate an object with default-values\n // and merge it with the retrieved partial-object:\n map(res =>({ ...new Customer(), ...res } as Customer))\n )\n .subscribe((response) => { console.log(response); });\n}\n\n\n", "Explained\nUsing Customer class as type for the http request does not influence the runtime code. Http resonse will not automatically create a new instance of your Customer class and assign the values to it. Types in typescript are just used for development purpose (compiler time) and will not influence the runtime code results. Checkout w3school introduction.\n\nTypeScript uses compile time type checking. Which means it checks if the specified types match before running the code, not while running the code.\n\nSolution\nTo get a Customer with your defaults you could create a new instance of the Customer which has the static default you have defined and assign only the http resonse values that are set. Something like :\nthis.http.get<Customer>('/api/getCustomer').subscribe((response) => {\n const customer = {...new Customer(), ...response}; // use spread operator to merge the two objects (defaults, response values)\n console.log(customer); // response will be a Customer instance \n });\n\nUsing spread operator inspired by kellermat solution.\n" ]
[ 3, 0 ]
[]
[]
[ "angular", "json" ]
stackoverflow_0074674071_angular_json.txt
Q: click to copy outerHtml javascript I was working on a project but i am facing a problem that how to copy outerHTML of any icon and i just want to copy the full outerHTML of <i class=""></i> element. And it will copy outerHTML of any <i> element only when the .icon is clicked. please write me a good javascript code which will work in all browser. And also add the beautiful copied alert <div class="preview"><i class="icon icon-account-card"></i><code>F1BA4</code><span>icon-account-card</span></div> <div class="preview"><i class="icon icon-account-card-outline"></i><code>F1BA5</code><span>icon-account-card-outline</span></div> <div class="preview"><i class="icon icon-account-credit-card"></i><code>F1BA6</code><span>icon-account-credit-card</span></div> <div class="preview"><i class="icon icon-account-credit-card-outline"></i><code>F1BA7</code><span>icon-account-credit-card-outline</span></div> <div class="preview"><i class="icon icon-air-purifier-off"></i><code>F1B57</code><span>icon-air-purifier-off</span></div> <div class="preview"><i class="icon icon-artboard"></i><code>F1B9A</code><span>icon-artboard</span></div> <div class="preview"><i class="icon icon-atv"></i><code>F1B70</code><span>icon-atv</span></div> <div class="preview"><i class="icon icon-awning"></i><code>F1B87</code><span>icon-awning</span></div> <div class="preview"><i class="icon icon-awning-outline"></i><code>F1B88</code><span>icon-awning-outline</span></div> <div class="preview"><i class="icon icon-bed-clock"></i><code>F1B94</code><span>icon-bed-clock</span></div> <div class="preview"><i class="icon icon-bookmark-box"></i><code>F1B75</code><span>icon-bookmark-box</span></div> <div class="preview"><i class="icon icon-bookmark-box-outline"></i><code>F1B76</code><span>icon-bookmark-box-outline</span></div> <div class="preview"><i class="icon icon-calendar-alert-outline"></i>`enter code here`<code>F1B62</code><span>icon-calendar-alert-outline</span></div> A: function copyOuterHTML() { // Get the element that was clicked const element = this; // Create a hidden textarea element const textarea = document.createElement('textarea'); // Set its value to the outerHTML of the clicked element textarea.value = element.outerHTML; // Add the textarea to the document document.body.appendChild(textarea); // Select the textarea so that its contents are selected textarea.select(); // Copy the selected text to the clipboard document.execCommand('copy'); // Remove the textarea from the document document.body.removeChild(textarea); } attach it to the click event of each of your .icon elements. You can do this using the addEventListener method const icons = document.querySelectorAll('.icon'); icons.forEach(icon => { icon.addEventListener('click', copyOuterHTML); });
click to copy outerHtml javascript
I was working on a project but i am facing a problem that how to copy outerHTML of any icon and i just want to copy the full outerHTML of <i class=""></i> element. And it will copy outerHTML of any <i> element only when the .icon is clicked. please write me a good javascript code which will work in all browser. And also add the beautiful copied alert <div class="preview"><i class="icon icon-account-card"></i><code>F1BA4</code><span>icon-account-card</span></div> <div class="preview"><i class="icon icon-account-card-outline"></i><code>F1BA5</code><span>icon-account-card-outline</span></div> <div class="preview"><i class="icon icon-account-credit-card"></i><code>F1BA6</code><span>icon-account-credit-card</span></div> <div class="preview"><i class="icon icon-account-credit-card-outline"></i><code>F1BA7</code><span>icon-account-credit-card-outline</span></div> <div class="preview"><i class="icon icon-air-purifier-off"></i><code>F1B57</code><span>icon-air-purifier-off</span></div> <div class="preview"><i class="icon icon-artboard"></i><code>F1B9A</code><span>icon-artboard</span></div> <div class="preview"><i class="icon icon-atv"></i><code>F1B70</code><span>icon-atv</span></div> <div class="preview"><i class="icon icon-awning"></i><code>F1B87</code><span>icon-awning</span></div> <div class="preview"><i class="icon icon-awning-outline"></i><code>F1B88</code><span>icon-awning-outline</span></div> <div class="preview"><i class="icon icon-bed-clock"></i><code>F1B94</code><span>icon-bed-clock</span></div> <div class="preview"><i class="icon icon-bookmark-box"></i><code>F1B75</code><span>icon-bookmark-box</span></div> <div class="preview"><i class="icon icon-bookmark-box-outline"></i><code>F1B76</code><span>icon-bookmark-box-outline</span></div> <div class="preview"><i class="icon icon-calendar-alert-outline"></i>`enter code here`<code>F1B62</code><span>icon-calendar-alert-outline</span></div>
[ "function copyOuterHTML() {\n // Get the element that was clicked\n const element = this;\n \n // Create a hidden textarea element\n const textarea = document.createElement('textarea');\n \n // Set its value to the outerHTML of the clicked element\n textarea.value = element.outerHTML;\n \n // Add the textarea to the document\n document.body.appendChild(textarea);\n \n // Select the textarea so that its contents are selected\n textarea.select();\n \n // Copy the selected text to the clipboard\n document.execCommand('copy');\n \n // Remove the textarea from the document\n document.body.removeChild(textarea);\n}\n\nattach it to the click event of each of your .icon elements. You can do this using the addEventListener method\nconst icons = document.querySelectorAll('.icon');\n\nicons.forEach(icon => {\n icon.addEventListener('click', copyOuterHTML);\n});\n\n" ]
[ 0 ]
[]
[]
[ "javascript", "jquery", "web" ]
stackoverflow_0074674761_javascript_jquery_web.txt
Q: How to make my Website more responsive using HTML and CSS I am starting to learn HTML and CSS and I decided to try to make a website with dreamweaver just for practice. But I am strongly struggling with making my website responsive. I tend to give fix position to every element and obviously that is very bad cause if you re-size the website is going to be completely destroyed. For that reason I would love if someone could have a look at my code and tell me what things I could change to make it more responsive and therefore adapt to different screen resolutions. Here is my HTML: ` <!doctype html> <html> <head> <meta charset="utf-8"> <title>Home Page</title> <link href="../style/style.css" rel="stylesheet" type="text/css"> </head> <body class="body"> <div id="container"> </div> <div id="left"> </div> <div id="content"> <nav> <a href="index.html"><img class="logo-img" src="../images/logo.png" width="150" height="75" alt=""/> <img class="insta" src="../images/insta.png" width="20" height="20" alt=""/> <img class="facebook" src="../images/facebook.png" width="20" height="20" alt=""/> <ul class="navlist"> <li class="navlistitem"><a href="index.html" class="navlist_style">Home</a> </li> <li class="navlistitem"><a href="classes.html" class="navlist_style">Classes</a></li> <li class="navlistitem"><a href="Gallery.html" class="navlist_style">Gallery</a> </li> <li class="navlistitem"><a href="Location.html" class="navlist_style">Location</a> </li> <li class="navlistitem"><a href="Contact.html" class="navlist_style">Contact</a></li> </ul> </nav> </div> <div id="right"> </div> <img class="ban" src="../images/ban.jpg" width="1139.5px" height="200px" alt=""/> </body> </html> and here is the CSS for this HTML code: nav { background-color:#3D9CC5; margin: 0; padding: 20; list-style-type: none; height: 78px; width: 100%; } body { margin:0; padding:0; } .navlist { padding: 0px; list-style-type: none; overflow: hidden; } .navlistitem { padding-top: 12px; padding-right: 17px; padding-bottom: 9px; padding-left: 1px; position: relative; float: left; font-family: Consolas, "Andale Mono", "Lucida Console", "Lucida Sans Typewriter", Monaco, "Courier New", monospace; } .navlist_style:active { color:#000000; } .navlist_style { text-decoration: none; margin-top: 0px; color: #FFFFFF; font-style: normal; font-size: 22px; text-align: center; margin-right: 40px; font-family: Gotham, "Helvetica Neue", Helvetica, Arial, sans-serif; } .navlist_style:hover { color:#000000; } .navlist_style::before { } .logo-img { float: left; margin-left: 250px; font-family: "Lucida Grande", "Lucida Sans Unicode", "Lucida Sans", "DejaVu Sans", Verdana, sans-serif; } .insta { padding-top: 30px; float: right; margin-right: 30px; } .facebook { float: right; padding-top: 30px; margin-right: 7px; } .ban { margin-left: 190px; } .body { margin: 0px; } .navlist_style::before { content: ''; display:block; height: 5px; background-color:#000000; position: absolute; top: 0px; width: 0px; transition: all ease-in-out 250ms; } .navlist_style:hover::before { width:57%; } #container { width: 100%; min-height: 100%; position: relative; } #left, #right { width: 17%; height: 3000px; position: absolute; z-index: -1; } #left { left: 0; background-color:#EDEBEB; } #right { right:0; background-color:#EDEBEB; } ` If you re-size the website you will see how it gets destroyed. Another question, how can I make the menus align in the center with the nav bar? Sorry if it's too much I know its not great but I am just starting... Thanks for everything! I think one of the ways to fix this is instead of setting X pixels the width just give a % of the page, but not sure how to do that. Every response is appreciated, thanks!
How to make my Website more responsive using HTML and CSS
I am starting to learn HTML and CSS and I decided to try to make a website with dreamweaver just for practice. But I am strongly struggling with making my website responsive. I tend to give fix position to every element and obviously that is very bad cause if you re-size the website is going to be completely destroyed. For that reason I would love if someone could have a look at my code and tell me what things I could change to make it more responsive and therefore adapt to different screen resolutions. Here is my HTML: ` <!doctype html> <html> <head> <meta charset="utf-8"> <title>Home Page</title> <link href="../style/style.css" rel="stylesheet" type="text/css"> </head> <body class="body"> <div id="container"> </div> <div id="left"> </div> <div id="content"> <nav> <a href="index.html"><img class="logo-img" src="../images/logo.png" width="150" height="75" alt=""/> <img class="insta" src="../images/insta.png" width="20" height="20" alt=""/> <img class="facebook" src="../images/facebook.png" width="20" height="20" alt=""/> <ul class="navlist"> <li class="navlistitem"><a href="index.html" class="navlist_style">Home</a> </li> <li class="navlistitem"><a href="classes.html" class="navlist_style">Classes</a></li> <li class="navlistitem"><a href="Gallery.html" class="navlist_style">Gallery</a> </li> <li class="navlistitem"><a href="Location.html" class="navlist_style">Location</a> </li> <li class="navlistitem"><a href="Contact.html" class="navlist_style">Contact</a></li> </ul> </nav> </div> <div id="right"> </div> <img class="ban" src="../images/ban.jpg" width="1139.5px" height="200px" alt=""/> </body> </html> and here is the CSS for this HTML code: nav { background-color:#3D9CC5; margin: 0; padding: 20; list-style-type: none; height: 78px; width: 100%; } body { margin:0; padding:0; } .navlist { padding: 0px; list-style-type: none; overflow: hidden; } .navlistitem { padding-top: 12px; padding-right: 17px; padding-bottom: 9px; padding-left: 1px; position: relative; float: left; font-family: Consolas, "Andale Mono", "Lucida Console", "Lucida Sans Typewriter", Monaco, "Courier New", monospace; } .navlist_style:active { color:#000000; } .navlist_style { text-decoration: none; margin-top: 0px; color: #FFFFFF; font-style: normal; font-size: 22px; text-align: center; margin-right: 40px; font-family: Gotham, "Helvetica Neue", Helvetica, Arial, sans-serif; } .navlist_style:hover { color:#000000; } .navlist_style::before { } .logo-img { float: left; margin-left: 250px; font-family: "Lucida Grande", "Lucida Sans Unicode", "Lucida Sans", "DejaVu Sans", Verdana, sans-serif; } .insta { padding-top: 30px; float: right; margin-right: 30px; } .facebook { float: right; padding-top: 30px; margin-right: 7px; } .ban { margin-left: 190px; } .body { margin: 0px; } .navlist_style::before { content: ''; display:block; height: 5px; background-color:#000000; position: absolute; top: 0px; width: 0px; transition: all ease-in-out 250ms; } .navlist_style:hover::before { width:57%; } #container { width: 100%; min-height: 100%; position: relative; } #left, #right { width: 17%; height: 3000px; position: absolute; z-index: -1; } #left { left: 0; background-color:#EDEBEB; } #right { right:0; background-color:#EDEBEB; } ` If you re-size the website you will see how it gets destroyed. Another question, how can I make the menus align in the center with the nav bar? Sorry if it's too much I know its not great but I am just starting... Thanks for everything! I think one of the ways to fix this is instead of setting X pixels the width just give a % of the page, but not sure how to do that. Every response is appreciated, thanks!
[]
[]
[ "As said by @Sfili_81, in another response, mediaqueries are the ones which you should give a try.\nBy my experience, I would like to suggest that better to start designs and develop your web application with Mobile view friendly mode, i.e. Design your webpages with widths / dimensions that you could be looking your website in mobile view, then design accordingly to Tablet view and then move to Web design mode.\nThis is because, it is generally seen, that it is easier to set @media queries, rather it becomes easier as developers pov to gradually move from Mobile View -> Tablet View -> Laptop View.\nComing to your 2nd question 'how can I make the menus align in the center with the nav bar?' will you kindly elaborate on this?\n\nI suppose you mean to say when the browser size decreases, you need your menus to center out? - this could be helped with what I suggested previously, with designing in Mobile view.\n\nGive a try to Bootstrap's own libraries, wrto responsiveness of webpages.\n", "To make it more responsive i suggest don't use px as unit, instead of it use rem unit because it sets automatically according to your screen.\nAs you have use px, so i suggest firstly in css set your default font-size- 62.5%= 10px and by setting this your 1rem=10px, after this change your px value into rem\ne.g. if your font size is 22px and then if want to change it into rem just divide it by 10px that will be 22px= 2.2rem\nSecond your can make @media file in css\nand then set your elements again, but set them in rem value.\n" ]
[ -1, -1 ]
[ "css", "html" ]
stackoverflow_0074548309_css_html.txt
Q: python: keep only unique combinations from two columns in either order of dataframe I have a problem very similar to the question here: Unique combination of two columns with mixed values however my original dataframe has an additional column of values. This value is always the same for each combination (ie A,B,5 and B,A,5). My plan is to essentially ignore it when creating the key column and then drop duplicate keys. My ideal solution would be a modified version of the df['key'] = np.sort(df.to_numpy(), axis=1).sum(1) solution that accounts for the third column since as is I get the error TypeError: '<' not supported between instances of 'float' and 'str' I also tried network['key'] = np.sort(network['col1', 'col2'].to_numpy(), axis=1).sum(1) but I get KeyError: ('col1', 'col2') I have also tried modifying the solution here: Python: Pandas: two columns with same values, alphabetically sorted and stored to be df['key'] = np.minimum(df['col1'], df['col2']) + np.maximum(df['col1'], df['col2']) but I get a very long message starting with A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead I have also tried the following solutions with no luck: (pandas) Drop duplicates based on subset where order doesn't matter Pandas complicated duplicate removal with three comparisons to other rows (pandas) Drop duplicates based on subset where order doesn't matter example input: col1 col2 col3 A B 5 B A 5 desired output: col1 col2 col3 A B 5 A: With the following toy dataframe: import pandas as pd df = pd.DataFrame( { "p1": ["a", "b", "a", "a", "b", "d", "c"], "p2": ["b", "a", "c", "d", "c", "a", "b"], "value": [1, 1, 2, 3, 5, 3, 5], }, columns=["p1", "p2", "value"], ) print(df) # Output p1 p2 value 0 a b 1 1 b a 1 2 a c 2 3 a d 3 4 b c 5 5 d a 3 6 c b 5 Here is one way to do it: df = df.loc[ (df["p1"] + df["p2"]).apply(sorted).drop_duplicates(keep="first").index, : ].reset_index(drop=True) Then: p1 p2 value 0 a b 1 1 a c 2 2 a d 3 3 b c 5
python: keep only unique combinations from two columns in either order of dataframe
I have a problem very similar to the question here: Unique combination of two columns with mixed values however my original dataframe has an additional column of values. This value is always the same for each combination (ie A,B,5 and B,A,5). My plan is to essentially ignore it when creating the key column and then drop duplicate keys. My ideal solution would be a modified version of the df['key'] = np.sort(df.to_numpy(), axis=1).sum(1) solution that accounts for the third column since as is I get the error TypeError: '<' not supported between instances of 'float' and 'str' I also tried network['key'] = np.sort(network['col1', 'col2'].to_numpy(), axis=1).sum(1) but I get KeyError: ('col1', 'col2') I have also tried modifying the solution here: Python: Pandas: two columns with same values, alphabetically sorted and stored to be df['key'] = np.minimum(df['col1'], df['col2']) + np.maximum(df['col1'], df['col2']) but I get a very long message starting with A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead I have also tried the following solutions with no luck: (pandas) Drop duplicates based on subset where order doesn't matter Pandas complicated duplicate removal with three comparisons to other rows (pandas) Drop duplicates based on subset where order doesn't matter example input: col1 col2 col3 A B 5 B A 5 desired output: col1 col2 col3 A B 5
[ "With the following toy dataframe:\nimport pandas as pd\n\ndf = pd.DataFrame(\n {\n \"p1\": [\"a\", \"b\", \"a\", \"a\", \"b\", \"d\", \"c\"],\n \"p2\": [\"b\", \"a\", \"c\", \"d\", \"c\", \"a\", \"b\"],\n \"value\": [1, 1, 2, 3, 5, 3, 5],\n },\n columns=[\"p1\", \"p2\", \"value\"],\n)\n\nprint(df)\n# Output\n p1 p2 value\n0 a b 1\n1 b a 1\n2 a c 2\n3 a d 3\n4 b c 5\n5 d a 3\n6 c b 5\n\nHere is one way to do it:\ndf = df.loc[\n (df[\"p1\"] + df[\"p2\"]).apply(sorted).drop_duplicates(keep=\"first\").index, :\n].reset_index(drop=True)\n\nThen:\n p1 p2 value\n0 a b 1\n1 a c 2\n2 a d 3\n3 b c 5\n\n" ]
[ 0 ]
[]
[]
[ "pandas", "python", "sorting" ]
stackoverflow_0074618025_pandas_python_sorting.txt
Q: Mapping GoDaddy domain to Firebase Hosting does not get verified I'm probably doing something silly, but I can't seem to be able to figure this out. I've waited over 24 hours for Firebase to validate my domain, but the status just stays at "needs setup" and "domain not verified". Per other other related answers on StackOverflow, I use @ as the Name for TXT, instead of what Firebase said to put there (I also tried the "acme-challenge.www.mydomain.com" for a while). Here are my GoDaddy records: Anyone got ideas what might be wrong? A: OK. I think I got to the bottom of it finally (I'm really new to this domain business). I originally added www.mydomain.com and not the naked mydomain.com to the "connect domain wizard". And I believe since I didn't have a CNAME www pointing to @ it could never have worked. Now I just added mydomain.com to the wizard, and Firebase instantly validated the domain. The status is now "pending" since it takes up to 24 hours to validate the certificate, but the site already works. A: Make sure to delete the original A records using the name @. Only keep the A records that are pointing to the Firebase Hosting IP address. EDIT: Change your TXT value to google-site-verification=YOURCODE. It must include google-site-verification=. A: Add custom domain from GoDaddy to firebase hosting Simple step-by-step solutions with screenshots are found here
Mapping GoDaddy domain to Firebase Hosting does not get verified
I'm probably doing something silly, but I can't seem to be able to figure this out. I've waited over 24 hours for Firebase to validate my domain, but the status just stays at "needs setup" and "domain not verified". Per other other related answers on StackOverflow, I use @ as the Name for TXT, instead of what Firebase said to put there (I also tried the "acme-challenge.www.mydomain.com" for a while). Here are my GoDaddy records: Anyone got ideas what might be wrong?
[ "OK. I think I got to the bottom of it finally (I'm really new to this domain business). I originally added www.mydomain.com and not the naked mydomain.com to the \"connect domain wizard\". And I believe since I didn't have a CNAME www pointing to @ it could never have worked.\nNow I just added mydomain.com to the wizard, and Firebase instantly validated the domain.\nThe status is now \"pending\" since it takes up to 24 hours to validate the certificate, but the site already works.\n", "Make sure to delete the original A records using the name @. Only keep the A records that are pointing to the Firebase Hosting IP address.\nEDIT:\nChange your TXT value to google-site-verification=YOURCODE.\nIt must include google-site-verification=.\n", "Add custom domain from GoDaddy to firebase hosting\n\nSimple step-by-step solutions with screenshots are found here\n\n" ]
[ 3, 1, 0 ]
[]
[]
[ "firebase", "firebase_hosting" ]
stackoverflow_0057356192_firebase_firebase_hosting.txt
Q: Jenkins pipeline - How to make a stage work for both Windows and Linux I am developing a declarative pipeline on Jenkins and one of the requisites is that it must work for both Windows and Linux. Right now, to achieve this I am making use of two stages, one for Linux environment and the other one for Windows environment, as it is possible to see in the code below stage('Integration Tests on Windows') { when { expression { env.OS == 'BAT' }} steps { dir('') { bat 'gradlew.bat integrationTest' junit '**/build/test-results/integrationTest/*.xml' } } } stage('Integration Tests on LINUX') { when { expression { env.OS == 'UNIX' }} steps { dir('') { sh 'gradlew integrationTest' junit '**/build/test-results/integrationTest/*.xml' } } } I was wondering if there is a better way to do this while keeping the pipeline declarative? A: Not sure whether you are using git. However our preferred way to do this is by using the sh step on both Linux and Windows: We do this by using the bash/sh which comes with Windows git. You just need to ensure that it (sh) is in the path (if e.g. you do a manual install git will even ask you to add its command line tools to the path). For the Jenkins nodes you may want to add this to your Jenkins node configuration. One alternative we use is a wrapper function which you may specify somewhere in your Jenkinsfile or in a Pipeline library. It may look something like this: def executeCmd(def args) { if (isUnix()) { sh args } else { bat args } } Please note that this obviously can only handle cases where the arguments would be 100% identical on Windows and Linux. Therefore I would recommend to use sh on Windows and Linux. Or if you prefer you may want to Powershell on Linux and use the pwsh step instead.
Jenkins pipeline - How to make a stage work for both Windows and Linux
I am developing a declarative pipeline on Jenkins and one of the requisites is that it must work for both Windows and Linux. Right now, to achieve this I am making use of two stages, one for Linux environment and the other one for Windows environment, as it is possible to see in the code below stage('Integration Tests on Windows') { when { expression { env.OS == 'BAT' }} steps { dir('') { bat 'gradlew.bat integrationTest' junit '**/build/test-results/integrationTest/*.xml' } } } stage('Integration Tests on LINUX') { when { expression { env.OS == 'UNIX' }} steps { dir('') { sh 'gradlew integrationTest' junit '**/build/test-results/integrationTest/*.xml' } } } I was wondering if there is a better way to do this while keeping the pipeline declarative?
[ "Not sure whether you are using git. However our preferred way to do this is by using the sh step on both Linux and Windows:\nWe do this by using the bash/sh which comes with Windows git. You just need to ensure that it (sh) is in the path (if e.g. you do a manual install git will even ask you to add its command line tools to the path). For the Jenkins nodes you may want to add this to your Jenkins node configuration.\nOne alternative we use is a wrapper function which you may specify somewhere in your Jenkinsfile or in a Pipeline library. It may look something like this:\ndef executeCmd(def args) {\n if (isUnix()) {\n sh args \n } else {\n bat args\n }\n}\n\nPlease note that this obviously can only handle cases where the arguments would be 100% identical on Windows and Linux.\nTherefore I would recommend to use sh on Windows and Linux.\nOr if you prefer you may want to Powershell on Linux and use the pwsh step instead.\n" ]
[ 0 ]
[]
[]
[ "jenkins", "jenkins_declarative_pipeline", "jenkins_pipeline", "pipeline" ]
stackoverflow_0074661039_jenkins_jenkins_declarative_pipeline_jenkins_pipeline_pipeline.txt
Q: A repeating document.write that asks how many times to repeat Faced a problem. On the site, i need to generate such a number of certain lines (via document.write("Welcome to my page") ) that the user will tell through the dialog box, when dialog box opening on the page. All this happens through <script> ... </script>. I tried it in different ways, but nothing works. Help me, please. confirm("Specify how many times to repeat?'); var run() = promt("How many times? {value}') function run(){ let value = document.getElementById('count').value if(confirm(Run block ${value} times?)) { while(value < 0){ document.write(Welcome to my page!<br>')} } } A friend suggested this, but it doesn't work either. let res = prompt('How many times?',1) if(!res || isNaN(Number(res)) || res < 0) { alert('Invalid quantity entered') location.reload() return } res = Math.round(Number(res)) while (res > 0){ document.write(`Welcome to my page!<br>')} res-- } A: You completely misunderstood Javascript. The initial code looks like rubbish. Is the code below what you want to achieve? alert('You`ll be prompted to specify how many times to repeat the cycle.'); const value = document.getElementById('count')?.value || 1; const numOfRuns = confirm(`As many as this: ${value}`) ? value : (new Number(prompt('How many times to repeat the cycle?'))); for (let i = 0; i < numOfRuns; i++) { document.write('Welcome to my page!<br>'); } A: Here is a simple solution to your problem. First, you can use the prompt() function to ask the user how many times they want to repeat the line. You can then use a for loop to repeat the line the specified number of times. Here is an example function run() { // Ask the user how many times to repeat the line let count = prompt("How many times should I repeat the line?"); // Make sure the input is a number if (!count || isNaN(Number(count)) || count < 0) { alert("Invalid quantity entered"); return; } // Convert the input to a number and round it to the nearest integer count = Math.round(Number(count)); // Use a for loop to repeat the line the specified number of times for (let i = 0; i < count; i++) { document.write("Welcome to my page!<br>"); } } You can call this function when the page loads by adding the following code inside a tag: run();
A repeating document.write that asks how many times to repeat
Faced a problem. On the site, i need to generate such a number of certain lines (via document.write("Welcome to my page") ) that the user will tell through the dialog box, when dialog box opening on the page. All this happens through <script> ... </script>. I tried it in different ways, but nothing works. Help me, please. confirm("Specify how many times to repeat?'); var run() = promt("How many times? {value}') function run(){ let value = document.getElementById('count').value if(confirm(Run block ${value} times?)) { while(value < 0){ document.write(Welcome to my page!<br>')} } } A friend suggested this, but it doesn't work either. let res = prompt('How many times?',1) if(!res || isNaN(Number(res)) || res < 0) { alert('Invalid quantity entered') location.reload() return } res = Math.round(Number(res)) while (res > 0){ document.write(`Welcome to my page!<br>')} res-- }
[ "You completely misunderstood Javascript. The initial code looks like rubbish.\nIs the code below what you want to achieve?\n\n\nalert('You`ll be prompted to specify how many times to repeat the cycle.');\nconst value = document.getElementById('count')?.value || 1;\n\nconst numOfRuns = confirm(`As many as this: ${value}`) ? value : (new Number(prompt('How many times to repeat the cycle?')));\n\nfor (let i = 0; i < numOfRuns; i++) {\n document.write('Welcome to my page!<br>');\n}\n\n\n\n", "Here is a simple solution to your problem. First, you can use the prompt() function to ask the user how many times they want to repeat the line. You can then use a for loop to repeat the line the specified number of times. Here is an example\nfunction run() {\n // Ask the user how many times to repeat the line\n let count = prompt(\"How many times should I repeat the line?\");\n\n // Make sure the input is a number\n if (!count || isNaN(Number(count)) || count < 0) {\n alert(\"Invalid quantity entered\");\n return;\n }\n\n // Convert the input to a number and round it to the nearest integer\n count = Math.round(Number(count));\n\n // Use a for loop to repeat the line the specified number of times\n for (let i = 0; i < count; i++) {\n document.write(\"Welcome to my page!<br>\");\n }\n}\n\nYou can call this function when the page loads by adding the following code inside a tag:\nrun();\n\n" ]
[ 0, -1 ]
[]
[]
[ "html", "javascript" ]
stackoverflow_0074674740_html_javascript.txt
Q: WPF Title binding not getting updated I'm trying to bind the window title to the value of a property of a custom class. The issue is that the window's title is not updated when the property is updated. The custom class: public class ObservableWindowTitle : INotifyPropertyChanged { public string AppName { get; } private string _currentFileName = string.Empty; public string CurrentFileName { get => _currentFileName; set { if (value is null) { throw new ArgumentNullException(nameof(value)); } if (_currentFileName != value) { _currentFileName = value; PropertyChanged?.Invoke(this, new(nameof(CurrentFileName))); } } } private bool _isUnsaved = false; public bool IsUnsaved { get => _isUnsaved; set { if (_isUnsaved != value) { _isUnsaved = value; PropertyChanged?.Invoke(this, new(nameof(_isUnsaved))); } } } public string Title { get => string.Format("{0}{1} - {2}", (IsUnsaved ? "*" : string.Empty), (CurrentFileName.Length == 0 ? "Untitled" : CurrentFileName), AppName); } public event PropertyChangedEventHandler? PropertyChanged; public ObservableWindowTitle(string appName) => AppName = appName; } The Window Title XAML: Title="{Binding Path=Title, Mode=OneWay, UpdateSourceTrigger=PropertyChanged}" The Window code: public partial class MainWindow : Window { const string fileDialogFilter = "Text Files (*.txt)|*.txt|All Files (*.*)|*.*"; readonly ILogger<MainWindow> _logger; ObservableWindowTitle observableTitle = new((Application.Current.FindResource("AppName") as string)!); public MainWindow(ILogger<MainWindow> logger) { _logger = logger; DataContext = observableTitle; InitializeComponent(); } private void Button_Click(object sender, RoutedEventArgs e) { observableTitle.CurrentFileName = "SomeFile"; } } The title is correctly displayed when starting the app: "Untitled - SharpNote" (AppName is a static resource with the value "SharpNote"). However, when clicking the button, the title does not update (should be "SomeFile - SharpNote"). A: You need to inform the Binding mechanism that the calculated property Title may have changed and needs to be re-evaluated. Add PropertyChanged?.Invoke(this, new(nameof(Title))); to the setter of CurrentFileName and IsUnsaved. BTW: PropertyChanged?.Invoke(this, new(nameof(_isUnsaved))); in your code is wrong; it needs to be PropertyChanged?.Invoke(this, new(nameof(IsUnsaved)));
WPF Title binding not getting updated
I'm trying to bind the window title to the value of a property of a custom class. The issue is that the window's title is not updated when the property is updated. The custom class: public class ObservableWindowTitle : INotifyPropertyChanged { public string AppName { get; } private string _currentFileName = string.Empty; public string CurrentFileName { get => _currentFileName; set { if (value is null) { throw new ArgumentNullException(nameof(value)); } if (_currentFileName != value) { _currentFileName = value; PropertyChanged?.Invoke(this, new(nameof(CurrentFileName))); } } } private bool _isUnsaved = false; public bool IsUnsaved { get => _isUnsaved; set { if (_isUnsaved != value) { _isUnsaved = value; PropertyChanged?.Invoke(this, new(nameof(_isUnsaved))); } } } public string Title { get => string.Format("{0}{1} - {2}", (IsUnsaved ? "*" : string.Empty), (CurrentFileName.Length == 0 ? "Untitled" : CurrentFileName), AppName); } public event PropertyChangedEventHandler? PropertyChanged; public ObservableWindowTitle(string appName) => AppName = appName; } The Window Title XAML: Title="{Binding Path=Title, Mode=OneWay, UpdateSourceTrigger=PropertyChanged}" The Window code: public partial class MainWindow : Window { const string fileDialogFilter = "Text Files (*.txt)|*.txt|All Files (*.*)|*.*"; readonly ILogger<MainWindow> _logger; ObservableWindowTitle observableTitle = new((Application.Current.FindResource("AppName") as string)!); public MainWindow(ILogger<MainWindow> logger) { _logger = logger; DataContext = observableTitle; InitializeComponent(); } private void Button_Click(object sender, RoutedEventArgs e) { observableTitle.CurrentFileName = "SomeFile"; } } The title is correctly displayed when starting the app: "Untitled - SharpNote" (AppName is a static resource with the value "SharpNote"). However, when clicking the button, the title does not update (should be "SomeFile - SharpNote").
[ "You need to inform the Binding mechanism that the calculated property Title may have changed and needs to be re-evaluated. Add\nPropertyChanged?.Invoke(this, new(nameof(Title)));\n\nto the setter of CurrentFileName and IsUnsaved.\nBTW: PropertyChanged?.Invoke(this, new(nameof(_isUnsaved))); in your code is wrong; it needs to be PropertyChanged?.Invoke(this, new(nameof(IsUnsaved)));\n" ]
[ 1 ]
[]
[]
[ "c#", "data_binding", "wpf" ]
stackoverflow_0074674791_c#_data_binding_wpf.txt
Q: Connection reset by peer when hitting Docker container I'm having a problem, where I can't send network requests to a Docker container I've created. I've exposed the correct ports, so I'm not sure what other issues could be at fault here. I have a server running in container alice at localhost:10009: $ docker exec -it alice bash bash-4.4# curl localhost:10009 curl: (52) Empty reply from server Port 10009 is exposed from my container: $ docker port alice 10009/tcp -> 0.0.0.0:10009 When doing the same curl from my host machine I get a different message: $ curl localhost:10009 curl: (56) Recv failure: Connection reset by peer A: I would check to see if the server application is configured to only listen to requests coming from its "localhost", this check depends on the type of server that you're using which is not mentioned. an easy check is to start your container as follows: docker run --network host -d yourimagename You don't need to worry about port mapping since you're using the host network then try to curl, if that works, then you'll just need to review your server listening IP setting. curl localhost:10009 A: I think there are some problems with @Bouzid Zitouni's answer, according to Docker official documentation: this is the same level of isolation as if the nginx process were running directly on the Docker host and not in a container However, if you use the --network host you will not have isolated networking in the container, and the host networking driver only works on Linux hosts. The problem of Connection refused/reset happens because your Server is listening on 127.0.0.1 inside the container and the port forwarding is going to external IP of the container (e.g. 172.17.0.2). Solution In your case you need to run a new container making your server to listen on all interfaces. Example using python http.server : docker run -p 8000:8000 -it python:3.7-slim python3 -m http.server --bind 0.0.0.0 Note The option --bind 0.0.0.0 it's specific option of http.server. Probally your server has other ways to specify this. References: https://pythonspeed.com/articles/docker-connection-refused/ https://docs.docker.com/network/network-tutorial-host/ A: I would like to expand on @Bouzid Zitouni's answer. It seems there is indeed an issue with the address(es) the server binds to. Connection reset by peer usually indicates that one has defined a port mapping for the container that does not point to a listening server. Here is an example to illustrate this: docker run -p 10009:10009 -it ubuntu bash Install nmap in container: apt-get update && apt install -y nmap Run ncat (localhost only) # ncat -v --listen localhost 10009 ... Ncat: Listening on 127.0.0.1:10009 Run curl on host: # curl localhost:10009 curl: (56) Recv failure: Connection reset by peer You actually get the same result even if you don't have any server process at all. Run ncat (all IPs) # ncat -v --listen 10009 ... Ncat: Listening on :::10009 Ncat: Listening on 0.0.0.0:10009 Curl on host connects successfully. Hope that helps. A: I faced the same error with the docker container running locally on my machine/laptop. I ran multiple containers and was using the same port number say 8080 for each container run. After killing all docker process and restarting docker i am now able to connect to the container on the mentioned port 8080 in my case. $ sudo service docker stop Warning: Stopping docker.service, but it can still be activated by: docker.socket $ sudo service docker start
Connection reset by peer when hitting Docker container
I'm having a problem, where I can't send network requests to a Docker container I've created. I've exposed the correct ports, so I'm not sure what other issues could be at fault here. I have a server running in container alice at localhost:10009: $ docker exec -it alice bash bash-4.4# curl localhost:10009 curl: (52) Empty reply from server Port 10009 is exposed from my container: $ docker port alice 10009/tcp -> 0.0.0.0:10009 When doing the same curl from my host machine I get a different message: $ curl localhost:10009 curl: (56) Recv failure: Connection reset by peer
[ "I would check to see if the server application is configured to only listen to requests coming from its \"localhost\", this check depends on the type of server that you're using which is not mentioned. \nan easy check is to start your container as follows:\ndocker run --network host -d yourimagename\n\nYou don't need to worry about port mapping since you're using the host network\nthen try to curl, if that works, then you'll just need to review your server listening IP setting.\ncurl localhost:10009\n\n", "I think there are some problems with @Bouzid Zitouni's answer, according to Docker official documentation:\n\nthis is the same level of isolation as if the nginx process were running directly on the Docker host and not in a container\n\nHowever, if you use the --network host you will not have isolated networking in the container, and the host networking driver only works on Linux hosts.\nThe problem of Connection refused/reset happens because your Server is listening on 127.0.0.1 inside the container and the port forwarding is going to external IP of the container (e.g. 172.17.0.2).\nSolution\nIn your case you need to run a new container making your server to listen on all interfaces. Example using python http.server :\ndocker run -p 8000:8000 -it python:3.7-slim python3 -m http.server --bind 0.0.0.0\n\nNote\nThe option --bind 0.0.0.0 it's specific option of http.server. Probally your server has other ways to specify this.\nReferences:\nhttps://pythonspeed.com/articles/docker-connection-refused/\nhttps://docs.docker.com/network/network-tutorial-host/\n", "I would like to expand on @Bouzid Zitouni's answer. It seems there is indeed an issue with the address(es) the server binds to.\nConnection reset by peer usually indicates that one has defined a port mapping for the container that does not point to a listening server. Here is an example to illustrate this:\ndocker run -p 10009:10009 -it ubuntu bash\n\nInstall nmap in container:\napt-get update && apt install -y nmap\n\nRun ncat (localhost only)\n# ncat -v --listen localhost 10009\n...\nNcat: Listening on 127.0.0.1:10009\n\nRun curl on host:\n# curl localhost:10009\ncurl: (56) Recv failure: Connection reset by peer\n\nYou actually get the same result even if you don't have any server process at all.\nRun ncat (all IPs)\n# ncat -v --listen 10009\n...\nNcat: Listening on :::10009\nNcat: Listening on 0.0.0.0:10009\n\nCurl on host connects successfully. Hope that helps.\n", "I faced the same error with the docker container running locally on my machine/laptop.\nI ran multiple containers and was using the same port number say 8080 for each container run.\nAfter killing all docker process and restarting docker i am now able to connect to the container on the mentioned port 8080 in my case.\n$ sudo service docker stop\nWarning: Stopping docker.service, but it can still be activated by:\n docker.socket\n$ sudo service docker start\n\n" ]
[ 75, 23, 14, 0 ]
[]
[]
[ "docker" ]
stackoverflow_0057773604_docker.txt
Q: Is it worth caching auth session in client-side state? I have a React app which uses Supabase for authentication. I'm wondering, is it best practice to store a user's session in client-side state (e.g. setState or the like) in order to avoid making an API call every time a component would like to know if a user is logged in? The isSignedIn function on the auth context currently looks like this. isSignedIn: async () => { // If there's a valid session object, yes, we're logged in if (!!session) return true // If we don't have truthy value for session, check with the server // and return appropriately let { data } = await client.auth.getSession() if (!!(data?.session)){ setSession(data.session) return true } } Though the tricky part here is, as long as there's something in the session state the app considers the user logged in despite the server having potentially ended the session. It seems to me like this is essentially a cache invalidation issue and I'm leaning toward removing the storing of the session in client-side state. Instead I'll just make an API call every time. Should I avoid storing the session in app state in favour of making an API call every time, or is my solution just implemented badly? Also, if all I have now is this async isSignedIn function on my auth context, how do I use it? I'm guessing I can't just use it in the render function of, say, for instance a <ProtectedRoute /> component - return authContext.isSignedIn() ? children : <Navigate to="/sign-in"/> It seems like I'd have to use it within useEffect, storing the result in some state of the component which seems like I'm now back at the start (storing the result in some state). A: Generally speaking you only need to check for authentication when you interact with the server somehow. If you make minor UI changes which dont fetch any new data there shouldn't be any reason to make an API call to check for authentication. If your components/code does such a thing its a sign you should rewrite it in a way so it doesn't. Then again if you're making calls to the server for data or have a genuine reason why the server terminating the session should make the user log out immediately or on an event that otherwise would do something that requires being logged in you should most definitely make the API call.
Is it worth caching auth session in client-side state?
I have a React app which uses Supabase for authentication. I'm wondering, is it best practice to store a user's session in client-side state (e.g. setState or the like) in order to avoid making an API call every time a component would like to know if a user is logged in? The isSignedIn function on the auth context currently looks like this. isSignedIn: async () => { // If there's a valid session object, yes, we're logged in if (!!session) return true // If we don't have truthy value for session, check with the server // and return appropriately let { data } = await client.auth.getSession() if (!!(data?.session)){ setSession(data.session) return true } } Though the tricky part here is, as long as there's something in the session state the app considers the user logged in despite the server having potentially ended the session. It seems to me like this is essentially a cache invalidation issue and I'm leaning toward removing the storing of the session in client-side state. Instead I'll just make an API call every time. Should I avoid storing the session in app state in favour of making an API call every time, or is my solution just implemented badly? Also, if all I have now is this async isSignedIn function on my auth context, how do I use it? I'm guessing I can't just use it in the render function of, say, for instance a <ProtectedRoute /> component - return authContext.isSignedIn() ? children : <Navigate to="/sign-in"/> It seems like I'd have to use it within useEffect, storing the result in some state of the component which seems like I'm now back at the start (storing the result in some state).
[ "Generally speaking you only need to check for authentication when you interact with the server somehow.\nIf you make minor UI changes which dont fetch any new data there shouldn't be any reason to make an API call to check for authentication. If your components/code does such a thing its a sign you should rewrite it in a way so it doesn't.\nThen again if you're making calls to the server for data or have a genuine reason why the server terminating the session should make the user log out immediately or on an event that otherwise would do something that requires being logged in you should most definitely make the API call.\n" ]
[ 0 ]
[]
[]
[ "authentication", "caching", "reactjs", "session_state", "supabase" ]
stackoverflow_0074674533_authentication_caching_reactjs_session_state_supabase.txt
Q: Dynamically create form fields and map to data model I am trying to create functionality that maps some input fields to a model however it needs to be dynamic(there can be multiple repetitions of the image I have below). The below image shows the design I am following Explanation of flow I have an input field called "current vehicle instalment" and a check box below it called "I am settling this account". If I click on the check box it will show 2 additional fields called "Account number" and "Financed by". Then I have a button called "Add another vehicle instalment". If I click this button it will add another vehicle input field called "Vehicle instalment 2" as well as another checkbox below it called "I am settling this account". If I click on the check box it will show 2 additional fields called "Account number" and "Financed by" that are related to vehicle 2. I can have a maximum of 3 vehicles so once I add the 3rd vehicle I will no longer see the "Add another vehicle instalment" button Here is my code so far HTML <div> <form-input [dspRestrictedInput]="{type : 'numeric'}" [formStatus]="formStatus" [parentFormGroup]="avafIncomeAndExpensesForm" [data]="formObjVehicleInstalment" (onChange)="onValueChanged()"></form-input> </div> <div> <form-checkbox [formStatus]="formStatus" [parentFormGroup]="avafIncomeAndExpensesForm" [data]="settlecheckboxObj"> </form-checkbox> </div> <div *ngIf="showSettlementFields"> <form-input [formStatus]="formStatus" [parentFormGroup]="avafIncomeAndExpensesForm" [data]="formObjAccountNumber" (onChange)="onValueChanged()"></form-input> </div> <div *ngIf="showSettlementFields"> <form-dropdown [formStatus]="formStatus" [parentFormGroup]="avafIncomeAndExpensesForm" [data]="formObjFinancedBy" (onChange)="onValueChanged()"> </form-dropdown> </div> <div (click)="addVehicleInstalment($event)"> <a><span class="expense-plus-icon" ></span></a> <h4>Add another vehicle instalment</h4> </div> TS onChangeSettlecheckBox(value) { if (value == true) { //show additional fields when check box is selected this.showSettlementFields = true; } } addVehicleInstalment(event) { //add next vehicle this.isVehicleInstalmentClicked = true; } This is an example model that I must map each vehicle object to "existingVehicleAccounts": [ { "instalment": 1000, "settle": true, "accountNumber": "11111", "financedBy": "Bank4" }, { "instalment": 2000, "settle": false, "accountNumber": "22222", "financedBy": "Bank2" }, { "instalment": 3000, "settle": true, "accountNumber": "33333", "financedBy": "Bank1" } ] I am currently trying to add the next vehicle section in my addVehicleInstalment method. Any suggestions on how I can do this. I think I need to use indexes to identify which vehicle is for which object in my model. I have added a very generic stackblitz example with just basic HTML functionality as I currently have it https://stackblitz.com/edit/angular-ngfor-click-index-inhntc?file=src%2Fapp%2Fapp.component.html,src%2Fapp%2Fapp.component.ts A: you should use FormArray, where every item is a FormGroup: ngOnInit() { this.vehicleForm = new FormGroup({ vehicleAccounts: new FormArray([]), }); // start with one empty account this.addInstalment(); } addInstalment() { const vehicleAccount = new FormGroup({ instalment: new FormControl(), settle: new FormControl(false), accountNumber: new FormControl(), financedBy: new FormControl(), }); this.vehicleAccounts.push(vehicleAccount); } Then, you define a getter to access this array in the template: get vehicleAccounts() { return this.vehicleForm.controls['vehicleAccounts'] as FormArray; } On every button click, you add a new FormGroup to the array and let it display via ngFor. And this is the template: <form [formGroup]="vehicleForm" (ngSubmit)="submit()"> <div formArrayName="vehicleAccounts"> <div *ngFor="let vehicleAccountForm of vehicleAccounts.controls; let i = index" > <hr /> <div [formGroup]="vehicleAccountForm"> <div class="form-field"> <label for="current">{{ i === 0 ? 'current vehicle instalment' : 'vehicle instalment ' + (i + 1) }}</label> <input type="text" id="current" formControlName="instalment" /> </div> <input type="checkbox" id="settling-{{ i }}" formControlName="settle" (change)="changeSettling($event, i)" /> <label for="settling-{{ i }}">I'm settling this account</label> <div *ngIf="vehicleAccountForm.controls['settle'].value"> <div class="form-field"> <label for="accountNumber">account number</label> <input type="text" id="accountNumber" formControlName="accountNumber" /> </div> <div class="form-field"> <label for="financedBy">financed by</label> <select id="financedByy" formControlName="financedBy"> <option value="bank 1">bank 1</option> <option value="bank 2">bank 2</option> <option value="bank 3">bank 3</option> <option value="bank 4">bank 4</option> </select> </div> </div> </div> </div> <hr /> </div> <button (click)="addInstalment()" [disabled]="vehicleAccounts.length === 3" type="button" > add another vehicle instalment </button> <button type="submit">submit</button> </form> Check the stackblitz example here
Dynamically create form fields and map to data model
I am trying to create functionality that maps some input fields to a model however it needs to be dynamic(there can be multiple repetitions of the image I have below). The below image shows the design I am following Explanation of flow I have an input field called "current vehicle instalment" and a check box below it called "I am settling this account". If I click on the check box it will show 2 additional fields called "Account number" and "Financed by". Then I have a button called "Add another vehicle instalment". If I click this button it will add another vehicle input field called "Vehicle instalment 2" as well as another checkbox below it called "I am settling this account". If I click on the check box it will show 2 additional fields called "Account number" and "Financed by" that are related to vehicle 2. I can have a maximum of 3 vehicles so once I add the 3rd vehicle I will no longer see the "Add another vehicle instalment" button Here is my code so far HTML <div> <form-input [dspRestrictedInput]="{type : 'numeric'}" [formStatus]="formStatus" [parentFormGroup]="avafIncomeAndExpensesForm" [data]="formObjVehicleInstalment" (onChange)="onValueChanged()"></form-input> </div> <div> <form-checkbox [formStatus]="formStatus" [parentFormGroup]="avafIncomeAndExpensesForm" [data]="settlecheckboxObj"> </form-checkbox> </div> <div *ngIf="showSettlementFields"> <form-input [formStatus]="formStatus" [parentFormGroup]="avafIncomeAndExpensesForm" [data]="formObjAccountNumber" (onChange)="onValueChanged()"></form-input> </div> <div *ngIf="showSettlementFields"> <form-dropdown [formStatus]="formStatus" [parentFormGroup]="avafIncomeAndExpensesForm" [data]="formObjFinancedBy" (onChange)="onValueChanged()"> </form-dropdown> </div> <div (click)="addVehicleInstalment($event)"> <a><span class="expense-plus-icon" ></span></a> <h4>Add another vehicle instalment</h4> </div> TS onChangeSettlecheckBox(value) { if (value == true) { //show additional fields when check box is selected this.showSettlementFields = true; } } addVehicleInstalment(event) { //add next vehicle this.isVehicleInstalmentClicked = true; } This is an example model that I must map each vehicle object to "existingVehicleAccounts": [ { "instalment": 1000, "settle": true, "accountNumber": "11111", "financedBy": "Bank4" }, { "instalment": 2000, "settle": false, "accountNumber": "22222", "financedBy": "Bank2" }, { "instalment": 3000, "settle": true, "accountNumber": "33333", "financedBy": "Bank1" } ] I am currently trying to add the next vehicle section in my addVehicleInstalment method. Any suggestions on how I can do this. I think I need to use indexes to identify which vehicle is for which object in my model. I have added a very generic stackblitz example with just basic HTML functionality as I currently have it https://stackblitz.com/edit/angular-ngfor-click-index-inhntc?file=src%2Fapp%2Fapp.component.html,src%2Fapp%2Fapp.component.ts
[ "you should use FormArray, where every item is a FormGroup:\nngOnInit() {\n this.vehicleForm = new FormGroup({\n vehicleAccounts: new FormArray([]),\n });\n\n // start with one empty account\n this.addInstalment();\n }\n\n addInstalment() {\n const vehicleAccount = new FormGroup({\n instalment: new FormControl(),\n settle: new FormControl(false),\n accountNumber: new FormControl(),\n financedBy: new FormControl(),\n });\n\n this.vehicleAccounts.push(vehicleAccount);\n }\n\nThen, you define a getter to access this array in the template:\nget vehicleAccounts() {\n return this.vehicleForm.controls['vehicleAccounts'] as FormArray;\n }\n\nOn every button click, you add a new FormGroup to the array and let it display via ngFor.\nAnd this is the template:\n\n\n<form [formGroup]=\"vehicleForm\" (ngSubmit)=\"submit()\">\n <div formArrayName=\"vehicleAccounts\">\n <div\n *ngFor=\"let vehicleAccountForm of vehicleAccounts.controls; let i = index\"\n >\n <hr />\n <div [formGroup]=\"vehicleAccountForm\">\n <div class=\"form-field\">\n <label for=\"current\">{{\n i === 0\n ? 'current vehicle instalment'\n : 'vehicle instalment ' + (i + 1)\n }}</label>\n <input type=\"text\" id=\"current\" formControlName=\"instalment\" />\n </div>\n <input\n type=\"checkbox\"\n id=\"settling-{{ i }}\"\n formControlName=\"settle\"\n (change)=\"changeSettling($event, i)\"\n />\n <label for=\"settling-{{ i }}\">I'm settling this account</label>\n <div *ngIf=\"vehicleAccountForm.controls['settle'].value\">\n <div class=\"form-field\">\n <label for=\"accountNumber\">account number</label>\n <input\n type=\"text\"\n id=\"accountNumber\"\n formControlName=\"accountNumber\"\n />\n </div>\n <div class=\"form-field\">\n <label for=\"financedBy\">financed by</label>\n <select id=\"financedByy\" formControlName=\"financedBy\">\n <option value=\"bank 1\">bank 1</option>\n <option value=\"bank 2\">bank 2</option>\n <option value=\"bank 3\">bank 3</option>\n <option value=\"bank 4\">bank 4</option>\n </select>\n </div>\n </div>\n </div>\n </div>\n <hr />\n </div>\n <button\n (click)=\"addInstalment()\"\n [disabled]=\"vehicleAccounts.length === 3\"\n type=\"button\"\n >\n add another vehicle instalment\n </button>\n <button type=\"submit\">submit</button>\n</form>\n\n\n\nCheck the stackblitz example here\n" ]
[ 1 ]
[]
[]
[ "angular" ]
stackoverflow_0074673173_angular.txt
Q: How to Rank a rows of a table by the highest occurance of one coloumn I have a User table like this in mysql, the referred_by field stands for the User id who referred this user into the platform ** id || first_name || last_name || username || referred_by || enrolled_date** 1 || frank || ed || edward || 3 || 2018-03-28 2 || Michael || Ana || michaelana || 3 || 2018-03-29 3 || Chi || dalu || Dalu || 2 || 2018-03-30 4 || Ema || Eli || Eni || 2 || 2018-04-02 5 || pin || nwa || nwa || 2 || 2018-04-02 What i am trying to do is fetch this data in a way that the users who have referred people the most will come first, for example the user with id 2 has referred 3 people and user id 2 has refered 2 people, so i will like to get the data like this ** id || first_name || last_name || username || referred_by || enrolled_date|| occurrence** 2 || Michael || Ana || michaelana || 3 || 2018-03-29 || 3 3 || Chi || dalu || Dalu || 2 || 2018-03-30 || 2 1 || frank || ed || edward || 3 || 2018-03-28 || 0 4 || Ema || Eli || Eni || 2 || 2018-04-02 || 0 5 || pin || nwa || nwa || 2 || 2018-04-02 || 0 Please i will like to know how i can write this in eloquent I am expecting a result in this order ** id || first_name || last_name || username || referred_by || enrolled_date|| occurrence** 2 || Michael || Ana || michaelana || 3 || 2018-03-29 || 3 3 || Chi || dalu || Dalu || 2 || 2018-03-30 || 2 1 || frank || ed || edward || 3 || 2018-03-28 || 0 4 || Ema || Eli || Eni || 2 || 2018-04-02 || 0 5 || pin || nwa || nwa || 2 || 2018-04-02 || 0 Please i will like to know how i can write this in eloquent A: // Path: app/Models/User.php class User extends Model { public function referredBy() { return $this->belongsTo(User::class, 'referred_by'); } public function referredUsers() { return $this->hasMany(User::class, 'referred_by'); } } // Path: app/Http/Controllers/UserController.php $users = User::withCount('referredUsers')->orderBy('referred_users_count', 'desc')->get();
How to Rank a rows of a table by the highest occurance of one coloumn
I have a User table like this in mysql, the referred_by field stands for the User id who referred this user into the platform ** id || first_name || last_name || username || referred_by || enrolled_date** 1 || frank || ed || edward || 3 || 2018-03-28 2 || Michael || Ana || michaelana || 3 || 2018-03-29 3 || Chi || dalu || Dalu || 2 || 2018-03-30 4 || Ema || Eli || Eni || 2 || 2018-04-02 5 || pin || nwa || nwa || 2 || 2018-04-02 What i am trying to do is fetch this data in a way that the users who have referred people the most will come first, for example the user with id 2 has referred 3 people and user id 2 has refered 2 people, so i will like to get the data like this ** id || first_name || last_name || username || referred_by || enrolled_date|| occurrence** 2 || Michael || Ana || michaelana || 3 || 2018-03-29 || 3 3 || Chi || dalu || Dalu || 2 || 2018-03-30 || 2 1 || frank || ed || edward || 3 || 2018-03-28 || 0 4 || Ema || Eli || Eni || 2 || 2018-04-02 || 0 5 || pin || nwa || nwa || 2 || 2018-04-02 || 0 Please i will like to know how i can write this in eloquent I am expecting a result in this order ** id || first_name || last_name || username || referred_by || enrolled_date|| occurrence** 2 || Michael || Ana || michaelana || 3 || 2018-03-29 || 3 3 || Chi || dalu || Dalu || 2 || 2018-03-30 || 2 1 || frank || ed || edward || 3 || 2018-03-28 || 0 4 || Ema || Eli || Eni || 2 || 2018-04-02 || 0 5 || pin || nwa || nwa || 2 || 2018-04-02 || 0 Please i will like to know how i can write this in eloquent
[ "// Path: app/Models/User.php\nclass User extends Model\n{\n public function referredBy()\n {\n return $this->belongsTo(User::class, 'referred_by');\n }\n\n public function referredUsers()\n {\n return $this->hasMany(User::class, 'referred_by');\n }\n}\n\n// Path: app/Http/Controllers/UserController.php\n$users = User::withCount('referredUsers')->orderBy('referred_users_count', 'desc')->get();\n\n" ]
[ 0 ]
[]
[]
[ "eloquent", "laravel", "mysql" ]
stackoverflow_0074674415_eloquent_laravel_mysql.txt
Q: LLVM IR: returning a value from a conditional block define i32 @func() { entry: %0 = alloca i32, align 4 store i32 12, i32* %0, align 4 %1 = load i32, i32* %0, align 4 %int_gt = icmp sgt i32 %1, 1 %ifcond = icmp ne i1 %int_gt, false br i1 %ifcond, label %consequent, label %otherwise consequent: ; preds = %entry ret i32 10 br label %ifcont otherwise: ; preds = %entry br label %ifcont ifcont: ; preds = %otherwise, %consequent %tmpIf = phi i1 [ true, %consequent ], [ false, %otherwise ] %2 = load i32, i32* %0, align 4 ret i32 %2 } define i32 @main() { entry: %call_func = call i32 @func() ret i32 %call_func } This is the IR I'm generating at the moment. It seems like ret i32 10 is ignored completely. Can anyone explain this? I've tried changing a stored value from within the conditional block and that seems to work just fine. A: Based on the code you have provided, it looks like the func function is intended to check if the value stored in the %0 variable is greater than 1. If it is, the function should return 10, otherwise it should return the value stored in %0.However, there are a few issues with this code that prevent it from working as intended. First, the %int_gt variable is assigned the result of a comparison between the value stored in %0 and 1, but this variable is not used later in the code. Second, the %ifcond variable is assigned the result of a comparison between %int_gt and false, but this variable is also not used later in the code. Third, the consequent block, which is the block that is executed if the value in %0 is greater than 1, contains a ret instruction that returns the value 10, but this return value is not used by the function. Finally, the ifcont block, which is executed after the consequent and otherwise blocks, contains a phi instruction that selects the value of true or false based on which of the two blocks was executed, but this value is not used by the function. Overall, the code as written does not correctly implement the intended behavior of the func function. To fix the code, you will need to adjust the conditional and return statements to correctly handle the comparison and return values. Here is one way you can try to fix the code so that it correctly implements the intended behavior of the func function: define i32 @func() { entry: %0 = alloca i32, align 4 store i32 12, i32* %0, align 4 %1 = load i32, i32* %0, align 4 %int_gt = icmp sgt i32 %1, 1 br i1 %int_gt, label %consequent, label %otherwise consequent: ; preds = %entry ret i32 10 otherwise: ; preds = %entry %2 = load i32, i32* %0, align 4 ret i32 %2 } define i32 @main() { entry: %call_func = call i32 @func() ret i32 %call_func } In this version of the code, the %int_gt variable is used in a br instruction to determine which of the consequent or otherwise blocks will be executed. The consequent block now contains a single ret instruction that returns the value 10, and the otherwise block contains a ret instruction that returns the value stored in %0.
LLVM IR: returning a value from a conditional block
define i32 @func() { entry: %0 = alloca i32, align 4 store i32 12, i32* %0, align 4 %1 = load i32, i32* %0, align 4 %int_gt = icmp sgt i32 %1, 1 %ifcond = icmp ne i1 %int_gt, false br i1 %ifcond, label %consequent, label %otherwise consequent: ; preds = %entry ret i32 10 br label %ifcont otherwise: ; preds = %entry br label %ifcont ifcont: ; preds = %otherwise, %consequent %tmpIf = phi i1 [ true, %consequent ], [ false, %otherwise ] %2 = load i32, i32* %0, align 4 ret i32 %2 } define i32 @main() { entry: %call_func = call i32 @func() ret i32 %call_func } This is the IR I'm generating at the moment. It seems like ret i32 10 is ignored completely. Can anyone explain this? I've tried changing a stored value from within the conditional block and that seems to work just fine.
[ "Based on the code you have provided, it looks like the func function is intended to check if the value stored in the %0 variable is greater than 1. If it is, the function should return 10, otherwise it should return the value stored in %0.However, there are a few issues with this code that prevent it from working as intended.\nFirst, the %int_gt variable is assigned the result of a comparison between the value stored in %0 and 1, but this variable is not used later in the code.\nSecond, the %ifcond variable is assigned the result of a comparison between %int_gt and false, but this variable is also not used later in the code.\nThird, the consequent block, which is the block that is executed if the value in %0 is greater than 1, contains a ret instruction that returns the value 10, but this return value is not used by the function.\nFinally, the ifcont block, which is executed after the consequent and otherwise blocks, contains a phi instruction that selects the value of true or false based on which of the two blocks was executed, but this value is not used by the function.\nOverall, the code as written does not correctly implement the intended behavior of the func function. To fix the code, you will need to adjust the conditional and return statements to correctly handle the comparison and return values.\nHere is one way you can try to fix the code so that it correctly implements the intended behavior of the func function:\ndefine i32 @func() {\nentry:\n %0 = alloca i32, align 4\n store i32 12, i32* %0, align 4\n %1 = load i32, i32* %0, align 4\n %int_gt = icmp sgt i32 %1, 1\n br i1 %int_gt, label %consequent, label %otherwise\n\nconsequent: ; preds = %entry\n ret i32 10\n\notherwise: ; preds = %entry\n %2 = load i32, i32* %0, align 4\n ret i32 %2\n}\n\ndefine i32 @main() {\nentry:\n %call_func = call i32 @func()\n ret i32 %call_func\n}\n\nIn this version of the code, the %int_gt variable is used in a br instruction to determine which of the consequent or otherwise blocks will be executed. The consequent block now contains a single ret instruction that returns the value 10, and the otherwise block contains a ret instruction that returns the value stored in %0.\n" ]
[ 0 ]
[]
[]
[ "c++", "llvm" ]
stackoverflow_0074674732_c++_llvm.txt
Q: Forward Call In Twilio I am facing an Issue I want to forward call to an agent if not answered then transfer call to next agent but the issue is I don't have first agent number. I have to make call on Ivr and send keys to connect with agent it is working fine. But Issue is if agent not answered call after 4 rings call to another agent . Call is not timeout because that is seem to be answered by IVR and when hang-up status is completed Is there a way to do call forwarding in that way. Here is the code const twiml = new Twilio.twiml.VoiceResponse(); const functionPath = ''; if (event.reason === "dialStatus") { console.log(event.DialCallStatus); if (event.DialCallStatus === "no-answer" || event.DialCallStatus === "busy" || (event.DialCallStatus === "completed")) { console.log('Duration'+event.DialCallDuration); return callback(null, twiml); } else { console.log(event.DialCallDuration); return callback(null, twiml); } } var phonenumber=ph.split('-'); const dialedPartyNumber =ph; var digit='www3' console.log(dialedPartyNumber); console.log(digit); const dial = twiml.dial({timeout:`5`, action: `${functionPath}?reason=dialStatus`,hangupOnStar:true }); dial.number({ sendDigits: digit }, dialedPartyNumber); callback(null, twiml); A: How I've done this before is to put the original call in a conference room. Then call the first agent and ask them to press X to join the conference. If they do not, then go to the second agent and repeat. david A: The problem you are describing isn't really an issue on Twilio, it's actually a question of state management. From your description, it sounds like you are trying to implement an "inbound queue" solution - where multiple agents are "logged-in" to a queue and will receive calls accordingly. If what I described is what you are trying to achieve, then a I would recommend trying something like the below: A call comes into your system and queries a remote script for the first agent. The remote script returns a Twiml to route to the relevant agent, with a dial timeout of 8 seconds. The remote script will re-invoked, in case where the call was not answered. When invoked, your server should know that the new invocation is from an existing session - which will respond back with a new agent. Upon answering the call, the answering agent marks the call is answered - making sure step one doesn't return that agent while they are on the phone. Remember, Twilio (and other CPaaS platforms as well) are asynchronous, which means that you will need to manage your call-routing and call-control states yourself.
Forward Call In Twilio
I am facing an Issue I want to forward call to an agent if not answered then transfer call to next agent but the issue is I don't have first agent number. I have to make call on Ivr and send keys to connect with agent it is working fine. But Issue is if agent not answered call after 4 rings call to another agent . Call is not timeout because that is seem to be answered by IVR and when hang-up status is completed Is there a way to do call forwarding in that way. Here is the code const twiml = new Twilio.twiml.VoiceResponse(); const functionPath = ''; if (event.reason === "dialStatus") { console.log(event.DialCallStatus); if (event.DialCallStatus === "no-answer" || event.DialCallStatus === "busy" || (event.DialCallStatus === "completed")) { console.log('Duration'+event.DialCallDuration); return callback(null, twiml); } else { console.log(event.DialCallDuration); return callback(null, twiml); } } var phonenumber=ph.split('-'); const dialedPartyNumber =ph; var digit='www3' console.log(dialedPartyNumber); console.log(digit); const dial = twiml.dial({timeout:`5`, action: `${functionPath}?reason=dialStatus`,hangupOnStar:true }); dial.number({ sendDigits: digit }, dialedPartyNumber); callback(null, twiml);
[ "How I've done this before is to put the original call in a conference room. Then call the first agent and ask them to press X to join the conference. If they do not, then go to the second agent and repeat.\ndavid\n", "The problem you are describing isn't really an issue on Twilio, it's actually a question of state management. From your description, it sounds like you are trying to implement an \"inbound queue\" solution - where multiple agents are \"logged-in\" to a queue and will receive calls accordingly.\nIf what I described is what you are trying to achieve, then a I would recommend trying something like the below:\n\nA call comes into your system and queries a remote script for the first agent.\nThe remote script returns a Twiml to route to the relevant agent, with a dial timeout of 8 seconds.\nThe remote script will re-invoked, in case where the call was not answered. When invoked, your server should know that the new invocation is from an existing session - which will respond back with a new agent.\nUpon answering the call, the answering agent marks the call is answered - making sure step one doesn't return that agent while they are on the phone.\n\nRemember, Twilio (and other CPaaS platforms as well) are asynchronous, which means that you will need to manage your call-routing and call-control states yourself.\n" ]
[ 1, 0 ]
[]
[]
[ "function", "ivr", "twilio", "twilio_twiml" ]
stackoverflow_0074581939_function_ivr_twilio_twilio_twiml.txt
Q: My jump button is not registering properly I have been designing this FPS controller from scratch in unity for a lottle bit now, and I cannot figure out why jumps wont register. everything else is working fine and how it is suppossed to but when I press my jump key it sometimes registers and sometimes just does not. At first I thought it was the way I invoked it, but I changed it and nothing I could think of seemed to work. I also thought it was maybe an issue with my ground check but it was working flawlessly. I thought it was maybe a clipping issue on the colliders, but it was nothing I could see. Its really weird, and I've never had anything happen like this before. When it does register, it works great, and I can even get it to work consistantly for a few consecutive trials before it just stops working again and then cycles to working again. Its weird, can I get some help? Code is here: P.S. everything is properly set up in the inspector. using System.Collections; using System.Collections.Generic; using UnityEngine; using System; public class PlayerMovement_v1 : MonoBehaviour { public Transform orientation; float xpos, zpos, xaxis, zaxis; public PlayerControls data; public Rigidbody rb; Vector3 jumpVec; Vector2 xmove, zmove, movement; public static Action jump; void Start() { rb.gameObject.GetComponent<Rigidbody>(); jump += Jump; } void FixedUpdate() { if (Input.GetKeyDown(KeyCode.Space)) { jump?.Invoke(); } input(); Movement(); } void input() { xpos = Input.GetAxisRaw("Horizontal"); zpos = Input.GetAxisRaw("Vertical"); xaxis = xpos * data.movementSpeed * data.movementMultiplier; zaxis = zpos * data.movementSpeed * data.movementMultiplier; } void Movement() { xmove = new Vector2(xaxis * orientation.right.x, xaxis * orientation.right.z); zmove = new Vector2(zaxis * orientation.forward.x, zaxis * orientation.forward.z); movement = (xmove + zmove).normalized * data.movementSpeed; rb.velocity = new Vector3(movement.x, rb.velocity.y, movement.y); } void Jump() { if (Ground_Check.isGrounded) { jumpVec = gameObject.transform.up * data.JumpMagnitude; rb.AddForce(jumpVec, ForceMode.Impulse); } } } A: The issue might be with the jump?.Invoke() line of code in the FixedUpdate function. This line of code checks if jump is null before invoking it, which means that if jump is null, the Jump function will not be called. However, jump is initialized in the Start function with the jump += Jump line of code, so it should not be the case. One possible solution would be to remove the ?. operator from the jump?.Invoke() line of code, so it becomes jump.Invoke(). This will ensure that the Jump function is always called when the Input.GetKeyDown(KeyCode.Space) condition is met. Additionally, it might be worth checking the Ground_Check.isGrounded condition to make sure that it appropriately detects when the player is on the ground, as this could also be causing the issue with jump not registering. If you're ever destroying or disabling the player, don't forget to unsubscribe from the jump action inside OnDisable() or OnDestroy() using jump -= Jump;
My jump button is not registering properly
I have been designing this FPS controller from scratch in unity for a lottle bit now, and I cannot figure out why jumps wont register. everything else is working fine and how it is suppossed to but when I press my jump key it sometimes registers and sometimes just does not. At first I thought it was the way I invoked it, but I changed it and nothing I could think of seemed to work. I also thought it was maybe an issue with my ground check but it was working flawlessly. I thought it was maybe a clipping issue on the colliders, but it was nothing I could see. Its really weird, and I've never had anything happen like this before. When it does register, it works great, and I can even get it to work consistantly for a few consecutive trials before it just stops working again and then cycles to working again. Its weird, can I get some help? Code is here: P.S. everything is properly set up in the inspector. using System.Collections; using System.Collections.Generic; using UnityEngine; using System; public class PlayerMovement_v1 : MonoBehaviour { public Transform orientation; float xpos, zpos, xaxis, zaxis; public PlayerControls data; public Rigidbody rb; Vector3 jumpVec; Vector2 xmove, zmove, movement; public static Action jump; void Start() { rb.gameObject.GetComponent<Rigidbody>(); jump += Jump; } void FixedUpdate() { if (Input.GetKeyDown(KeyCode.Space)) { jump?.Invoke(); } input(); Movement(); } void input() { xpos = Input.GetAxisRaw("Horizontal"); zpos = Input.GetAxisRaw("Vertical"); xaxis = xpos * data.movementSpeed * data.movementMultiplier; zaxis = zpos * data.movementSpeed * data.movementMultiplier; } void Movement() { xmove = new Vector2(xaxis * orientation.right.x, xaxis * orientation.right.z); zmove = new Vector2(zaxis * orientation.forward.x, zaxis * orientation.forward.z); movement = (xmove + zmove).normalized * data.movementSpeed; rb.velocity = new Vector3(movement.x, rb.velocity.y, movement.y); } void Jump() { if (Ground_Check.isGrounded) { jumpVec = gameObject.transform.up * data.JumpMagnitude; rb.AddForce(jumpVec, ForceMode.Impulse); } } }
[ "The issue might be with the jump?.Invoke() line of code in the FixedUpdate function. This line of code checks if jump is null before invoking it, which means that if jump is null, the Jump function will not be called. However, jump is initialized in the Start function with the jump += Jump line of code, so it should not be the case.\nOne possible solution would be to remove the ?. operator from the jump?.Invoke() line of code, so it becomes jump.Invoke(). This will ensure that the Jump function is always called when the Input.GetKeyDown(KeyCode.Space) condition is met.\nAdditionally, it might be worth checking the Ground_Check.isGrounded condition to make sure that it appropriately detects when the player is on the ground, as this could also be causing the issue with jump not registering.\nIf you're ever destroying or disabling the player, don't forget to unsubscribe from the jump action inside OnDisable() or OnDestroy() using jump -= Jump;\n" ]
[ 0 ]
[]
[]
[ "c#", "game_development", "input", "registration", "unity3d" ]
stackoverflow_0074673900_c#_game_development_input_registration_unity3d.txt
Q: Why the selected dropdown value is not getting shown? I am building a function on my WordPress plugin where it displays a dropdown of all the available pages. When I click on Save Changes, the value gets saved perfectly in the DB. It updates the value perfectly too. But, the selected value is not being shown in the dropdown. When Save Changes is clicked the value gets saved, but the dropdown again resets to "Choose one". It doesn't get to display the selected option. Am I doing something wrong here? Any guidance is appreciated. <form method=post> <div class="header-right"> <?php $posts = get_pages( array( 'post_status' => 'publish', ) ); ?> <select name="page_for_logged_in" id="page_for_logged_in"> <option selected="selected">Choose one</option> <?php foreach ( $posts as $page ) { ?> <option value="<?php echo esc_attr( $page->post_name ); ?>" <?php selected(get_option('page_for_logged_in'), 'page')?>><?php echo esc_html( $page->post_title ); ?></option> <?php } ?> </select> <?php if(empty($_POST['page_for_logged_in'])) { } else { $myvalue=$_POST['page_for_logged_in']; update_option('page_for_logged_in', $myvalue, $autoload = 'no'); } ?> <?php submit_button(); ?> </p> </br> </br> </form> A: Okay, so I figured out the working solution to my issue. Pasted below; Might be helpful for someone. <form method=post> <div class="header-right"> <?php $posts = get_pages( array( 'post_status' => 'publish', ) ); ?> <?php if(empty($_POST['page_for_logged_in'])) { } else { $myvalue=$_POST['page_for_logged_in']; update_option('page_for_logged_in', $myvalue, $autoload = 'yes'); } ?> <select name="page_for_logged_in" id="page_for_logged_in"> <option value="" disabled selected>Choose one</option> <?php foreach ( $posts as $page ) { ?> <option value="<?php echo esc_attr( $page->post_title ); ?>" <?php echo ( get_option('page_for_logged_in') == $page->post_title ? 'selected' : '' ); ?>><?php echo esc_html( $page->post_title ); ?></option> <?php } ?> </select> <?php submit_button(); ?> </p> </br> </br> </form>
Why the selected dropdown value is not getting shown?
I am building a function on my WordPress plugin where it displays a dropdown of all the available pages. When I click on Save Changes, the value gets saved perfectly in the DB. It updates the value perfectly too. But, the selected value is not being shown in the dropdown. When Save Changes is clicked the value gets saved, but the dropdown again resets to "Choose one". It doesn't get to display the selected option. Am I doing something wrong here? Any guidance is appreciated. <form method=post> <div class="header-right"> <?php $posts = get_pages( array( 'post_status' => 'publish', ) ); ?> <select name="page_for_logged_in" id="page_for_logged_in"> <option selected="selected">Choose one</option> <?php foreach ( $posts as $page ) { ?> <option value="<?php echo esc_attr( $page->post_name ); ?>" <?php selected(get_option('page_for_logged_in'), 'page')?>><?php echo esc_html( $page->post_title ); ?></option> <?php } ?> </select> <?php if(empty($_POST['page_for_logged_in'])) { } else { $myvalue=$_POST['page_for_logged_in']; update_option('page_for_logged_in', $myvalue, $autoload = 'no'); } ?> <?php submit_button(); ?> </p> </br> </br> </form>
[ "Okay, so I figured out the working solution to my issue. Pasted below; Might be helpful for someone.\n<form method=post>\n <div class=\"header-right\">\n <?php\n $posts = get_pages(\n array(\n 'post_status' => 'publish',\n )\n );\n \n ?>\n <?php\n if(empty($_POST['page_for_logged_in'])) {\n \n } else {\n $myvalue=$_POST['page_for_logged_in'];\n update_option('page_for_logged_in', $myvalue, $autoload = 'yes');\n }\n ?>\n <select name=\"page_for_logged_in\" id=\"page_for_logged_in\">\n <option value=\"\" disabled selected>Choose one</option>\n <?php\n foreach ( $posts as $page ) {\n ?>\n <option value=\"<?php echo esc_attr( $page->post_title ); ?>\" <?php echo ( get_option('page_for_logged_in') == $page->post_title ? 'selected' : '' ); ?>><?php echo esc_html( $page->post_title ); ?></option>\n <?php\n }\n ?>\n </select>\n\n <?php submit_button(); ?>\n\n </p>\n </br>\n </br>\n </form>\n\n" ]
[ 0 ]
[]
[]
[ "dropdown", "frontend", "html", "php", "wordpress" ]
stackoverflow_0074673916_dropdown_frontend_html_php_wordpress.txt
Q: Does DbContext need MemoryCache or Redis in Net Core? We have multiple lookup tables in a SQL Server database. Tables are used in application dropdown menus. AddressType: Home Business ProductType: Books Food Electronics Instead of continually reading database lookup tables, team wants to apply MemoryCache or Redis Cache (to reduce database load, and increase in-memory performance). However, we learned that Entity Framework Core DbContext already has caching. So in this case, would there be any value in caching looking tables, if after a certain amount of reads, the DbContext does this already? Would adding more caching code for MemoryCache/Redis over DbContext help? Or would it be an unnecessary layer? A: There may be some value in using a cache such as MemoryCache or Redis Cache in addition to the built-in caching provided by Entity Framework Core DbContext. This is because the built-in caching in DbContext only applies to the current DbContext instance, whereas a cache such as MemoryCache or Redis Cache can be shared among multiple DbContext instances and even across multiple processes. This means that if you are using multiple DbContext instances in your application, or if you have multiple instances of your application running simultaneously, the built-in caching in DbContext may not provide the same level of performance benefits as using a shared cache like MemoryCache or Redis Cache. In these cases, using a shared cache in addition to the built-in caching in DbContext may provide additional performance benefits. However, it is also important to consider the complexity and maintenance overhead of adding an additional caching layer to your application. If the performance benefits of using a shared cache are not significant, or if the added complexity is not justified by the performance gains, then it may be better to stick with the built-in caching provided by DbContext. Overall, the decision to use a shared cache in addition to the built-in caching in DbContext will depend on your specific application and its performance requirements. It may be worth conducting some performance tests to see if using a shared cache provides significant benefits in your particular situation.
Does DbContext need MemoryCache or Redis in Net Core?
We have multiple lookup tables in a SQL Server database. Tables are used in application dropdown menus. AddressType: Home Business ProductType: Books Food Electronics Instead of continually reading database lookup tables, team wants to apply MemoryCache or Redis Cache (to reduce database load, and increase in-memory performance). However, we learned that Entity Framework Core DbContext already has caching. So in this case, would there be any value in caching looking tables, if after a certain amount of reads, the DbContext does this already? Would adding more caching code for MemoryCache/Redis over DbContext help? Or would it be an unnecessary layer?
[ "There may be some value in using a cache such as MemoryCache or Redis Cache in addition to the built-in caching provided by Entity Framework Core DbContext. This is because the built-in caching in DbContext only applies to the current DbContext instance, whereas a cache such as MemoryCache or Redis Cache can be shared among multiple DbContext instances and even across multiple processes.\nThis means that if you are using multiple DbContext instances in your application, or if you have multiple instances of your application running simultaneously, the built-in caching in DbContext may not provide the same level of performance benefits as using a shared cache like MemoryCache or Redis Cache. In these cases, using a shared cache in addition to the built-in caching in DbContext may provide additional performance benefits.\nHowever, it is also important to consider the complexity and maintenance overhead of adding an additional caching layer to your application. If the performance benefits of using a shared cache are not significant, or if the added complexity is not justified by the performance gains, then it may be better to stick with the built-in caching provided by DbContext.\nOverall, the decision to use a shared cache in addition to the built-in caching in DbContext will depend on your specific application and its performance requirements. It may be worth conducting some performance tests to see if using a shared cache provides significant benefits in your particular situation.\n" ]
[ 0 ]
[]
[]
[ ".net", ".net_core", "asp.net_core", "c#", "entity_framework_core" ]
stackoverflow_0058157908_.net_.net_core_asp.net_core_c#_entity_framework_core.txt
Q: Need only parent tag when i parse a html tag using beautifulsoup If i specify li in find_all() method i should only get the parent elements and not all li elements in the html. ofcourse it makes sense that find_all() takes all li into consideration but i can use child or parent in the loop to get the child list elements. I'm trying to parse only the parent tags and print them in a single block. nested li should not be taken into consideration, Please help! <html> <p> something </p> <li> text i need </li> <li> text i need <ol> <li> text i need but appended to parent li tag </li> <li> text i need but appended to parent li tag </li> </ol> </li> <li> text i need </li> when i use soup = BeautifulSoup(fp, 'html.parser') for list in soup.find_all("li"): output_text = list.get_text() print(output_text) print("--sep--") My result should be text i need --sep-- text i need text i need but appended to parent li tag text i need but appended to parent li tag --sep-- text i need --sep-- But my result is text i need --sep-- text i need --sep-- text i need but appended to parent li tag --sep-- text i need but appended to parent li tag --sep-- text i need --sep-- A: Try to use .find_parent to filter-out unwanted <li>: from bs4 import BeautifulSoup html_doc = """\ <html> <p> something </p> <li> text i need </li> <li> text i need <ol> <li> text i need but appended to parent li tag </li> <li> text i need but appended to parent li tag </li> </ol> </li> <li> text i need </li>""" soup = BeautifulSoup(html_doc, "html.parser") for li in soup.select("li"): if li.find_parent("li"): continue print(" ".join(li.text.split())) print("--sep--") Prints: text i need --sep-- text i need text i need but appended to parent li tag text i need but appended to parent li tag --sep-- text i need --sep--
Need only parent tag when i parse a html tag using beautifulsoup
If i specify li in find_all() method i should only get the parent elements and not all li elements in the html. ofcourse it makes sense that find_all() takes all li into consideration but i can use child or parent in the loop to get the child list elements. I'm trying to parse only the parent tags and print them in a single block. nested li should not be taken into consideration, Please help! <html> <p> something </p> <li> text i need </li> <li> text i need <ol> <li> text i need but appended to parent li tag </li> <li> text i need but appended to parent li tag </li> </ol> </li> <li> text i need </li> when i use soup = BeautifulSoup(fp, 'html.parser') for list in soup.find_all("li"): output_text = list.get_text() print(output_text) print("--sep--") My result should be text i need --sep-- text i need text i need but appended to parent li tag text i need but appended to parent li tag --sep-- text i need --sep-- But my result is text i need --sep-- text i need --sep-- text i need but appended to parent li tag --sep-- text i need but appended to parent li tag --sep-- text i need --sep--
[ "Try to use .find_parent to filter-out unwanted <li>:\nfrom bs4 import BeautifulSoup\n\nhtml_doc = \"\"\"\\\n<html>\n<p>\nsomething\n</p>\n<li>\ntext i need\n</li>\n<li>\ntext i need \n <ol>\n <li>\n text i need but appended to parent li tag\n </li>\n <li>\n text i need but appended to parent li tag\n </li>\n </ol>\n</li>\n<li>\ntext i need\n</li>\"\"\"\n\nsoup = BeautifulSoup(html_doc, \"html.parser\")\n\nfor li in soup.select(\"li\"):\n if li.find_parent(\"li\"):\n continue\n print(\" \".join(li.text.split()))\n print(\"--sep--\")\n\nPrints:\ntext i need\n--sep--\ntext i need text i need but appended to parent li tag text i need but appended to parent li tag\n--sep--\ntext i need\n--sep--\n\n" ]
[ 0 ]
[]
[]
[ "beautifulsoup", "html", "python", "python_3.x" ]
stackoverflow_0074674636_beautifulsoup_html_python_python_3.x.txt
Q: How to write all tables from Glue Data Catalog to Amazon S3? I'm trying to read data from an Glue Data Catalog and then write to Amazon S3. However, I was able to retrieve only the first 100 tables from my database, but the total of tables is around 500. Those 100 tables I'm able to write to S3, but not the remaining tables. Here is my code: sc = SparkContext() glueContext = GlueContext(sc) spark = glueContext.spark_session job = Job(glueContext) job.init(args['JOB_NAME'], args) client = boto3.client(service_name='glue', region_name=aws_region) responseGetTables = client.get_tables(DatabaseName=glue_database) tableList = responseGetTables['TableList'] tables = [] for tableDict in tableList: tables.append(tableDict['Name']) for table in tables: datasource = glueContext.create_dynamic_frame.from_catalog(database = glue_database, table_name = table) datasink = glueContext.write_dynamic_frame.from_options(frame = datasource, connection_type = "s3", connection_options = {"path": s3Path + table}, format = target_format) job.commit() I tried to insert the NextToken, but I'm not sure how I can accomplish this. Any help? A: You can paginate through the list of tables in your Glue Data Catalog using the NextToken parameter in the get_tables method. Here is how you can do that in your code: client = boto3.client(service_name='glue', region_name=aws_region) # Set the maximum number of tables to retrieve in each call max_results = 100 # Initialize the list of tables tables = [] # Initialize the token to use in the first call next_token = None while True: # Set the parameters for the get_tables call params = { "DatabaseName": glue_database, "MaxResults": max_results, } if next_token: # If there is a token, add it to the parameters params["NextToken"] = next_token # Make the call to get_tables responseGetTables = client.get_tables(**params) # Extract the list of table dictionaries table_list = responseGetTables['TableList'] # Add the names of the tables to the list of tables for table_dict in table_list: tables.append(table_dict['Name']) # Check if there is a next token if "NextToken" in responseGetTables: # If there is a next token, update the token to use in the next call next_token = responseGetTables["NextToken"] else: # If there is no next token, break the loop break # At this point, the tables variable should contain the names of all the tables in the database for table in tables: datasource = glueContext.create_dynamic_frame.from_catalog(database = glue_database, table_name = table) datasink = glueContext.write_dynamic_frame.from_options(frame = datasource, connection_type = "s3", connection_options = {"path": s3Path + table}, format = target_format)
How to write all tables from Glue Data Catalog to Amazon S3?
I'm trying to read data from an Glue Data Catalog and then write to Amazon S3. However, I was able to retrieve only the first 100 tables from my database, but the total of tables is around 500. Those 100 tables I'm able to write to S3, but not the remaining tables. Here is my code: sc = SparkContext() glueContext = GlueContext(sc) spark = glueContext.spark_session job = Job(glueContext) job.init(args['JOB_NAME'], args) client = boto3.client(service_name='glue', region_name=aws_region) responseGetTables = client.get_tables(DatabaseName=glue_database) tableList = responseGetTables['TableList'] tables = [] for tableDict in tableList: tables.append(tableDict['Name']) for table in tables: datasource = glueContext.create_dynamic_frame.from_catalog(database = glue_database, table_name = table) datasink = glueContext.write_dynamic_frame.from_options(frame = datasource, connection_type = "s3", connection_options = {"path": s3Path + table}, format = target_format) job.commit() I tried to insert the NextToken, but I'm not sure how I can accomplish this. Any help?
[ "You can paginate through the list of tables in your Glue Data Catalog using the NextToken parameter in the get_tables method. Here is how you can do that in your code:\nclient = boto3.client(service_name='glue', region_name=aws_region)\n\n# Set the maximum number of tables to retrieve in each call\nmax_results = 100\n\n# Initialize the list of tables\ntables = []\n\n# Initialize the token to use in the first call\nnext_token = None\n\nwhile True:\n # Set the parameters for the get_tables call\n params = {\n \"DatabaseName\": glue_database,\n \"MaxResults\": max_results,\n }\n if next_token:\n # If there is a token, add it to the parameters\n params[\"NextToken\"] = next_token\n\n # Make the call to get_tables\n responseGetTables = client.get_tables(**params)\n\n # Extract the list of table dictionaries\n table_list = responseGetTables['TableList']\n\n # Add the names of the tables to the list of tables\n for table_dict in table_list:\n tables.append(table_dict['Name'])\n\n # Check if there is a next token\n if \"NextToken\" in responseGetTables:\n # If there is a next token, update the token to use in the next call\n next_token = responseGetTables[\"NextToken\"]\n else:\n # If there is no next token, break the loop\n break\n\n# At this point, the tables variable should contain the names of all the tables in the database\n\nfor table in tables:\n datasource = glueContext.create_dynamic_frame.from_catalog(database = glue_database, table_name = table)\n datasink = glueContext.write_dynamic_frame.from_options(frame = datasource, connection_type = \"s3\", connection_options = {\"path\": s3Path + table}, format = target_format)\n\n" ]
[ 0 ]
[]
[]
[ "amazon_s3", "amazon_web_services", "aws_glue" ]
stackoverflow_0074646981_amazon_s3_amazon_web_services_aws_glue.txt
Q: 'Currency' is only available in iOS 16 or newer? After updating Xcode to 14 suddenly I'm unable to build our app, getting the error 'Currency' is only available in iOS 16 or newer but we've been using Currency already as our backend model: struct Currency: Equatable { let code: String let symbol: String let localizedString: String } We getting the error when using Currency here: extension Locale { static let availableCurrencies: [Currency] = Currency.availableCurrencies } A: The reason why the error is newly displaying in xcode 14 is, that Apple added their own struct Currency to Foundation which is only available in iOS 16. In some places it cannot determine which Currency you mean. In our soulution we just renamed our struct Currency to struct CurrenyModel to resolve the naming conflict. You can use F2 (or Right Click -> Refactor -> Rename) when the name is selected to rename all references at once.
'Currency' is only available in iOS 16 or newer?
After updating Xcode to 14 suddenly I'm unable to build our app, getting the error 'Currency' is only available in iOS 16 or newer but we've been using Currency already as our backend model: struct Currency: Equatable { let code: String let symbol: String let localizedString: String } We getting the error when using Currency here: extension Locale { static let availableCurrencies: [Currency] = Currency.availableCurrencies }
[ "The reason why the error is newly displaying in xcode 14 is, that Apple added their own struct Currency to Foundation which is only available in iOS 16. In some places it cannot determine which Currency you mean.\nIn our soulution we just renamed our struct Currency to struct CurrenyModel to resolve the naming conflict. You can use F2 (or Right Click -> Refactor -> Rename) when the name is selected to rename all references at once.\n" ]
[ 1 ]
[ "In my case I was using Language enum like that :-\nenum Language: String { \n case none = \"\"\n case en = \"English\"\n case fr = \"French\"\n case it = \"Italian\"\n case de = \"German\"\n case ar = \"Arabic\"\n\n}\n\nThen I have just renamed that with LanguageModal like that :-\nenum LanguageModal: String {\n case none = \"\"\n case en = \"English\"\n case fr = \"French\"\n case it = \"Italian\"\n case de = \"German\"\n case ar = \"Arabic\"\n}\n\nAnd it works\n" ]
[ -1 ]
[ "ios", "swift", "xcode" ]
stackoverflow_0073743894_ios_swift_xcode.txt
Q: Changing Qt OpenGL Window Example to use OpenGL 3.3 I'm trying to change the Qt OpenGL Example to use more modern opengl, version 330 seems to be suitable. So I did: set the version and profile on main.cpp set version of shaders changed shaders to use uniform It now builds without any errors however I see just a blank window. What am I getting wrong? Perhaps multiple things? main.cpp #include <QGuiApplication> #include "openglwindow.h" int main(int argc, char** argv) { QSurfaceFormat fmt; fmt.setSamples(16); fmt.setDepthBufferSize(24); fmt.setVersion(3, 3); fmt.setProfile(QSurfaceFormat::CoreProfile); QGuiApplication app(argc, argv); OpenGLWindow window; window.setFormat(fmt); window.resize(640, 480); window.show(); window.setAnimating(true); return app.exec(); } openglwindow.h #include <QWindow> #include <QOpenGLFunctions> class QPainter; class QOpenGLContext; class QOpenGLPaintDevice; #include <QOpenGLShaderProgram> #include <QKeyEvent> class OpenGLWindow : public QWindow, protected QOpenGLFunctions { Q_OBJECT bool m_animating = false; QOpenGLContext* m_context = nullptr; QOpenGLPaintDevice* m_device = nullptr; GLint m_posAttr = 0; GLint m_colAttr = 0; GLint m_matrixUniform = 0; QOpenGLShaderProgram* m_program = nullptr; int m_frame = 0; public: explicit OpenGLWindow(QWindow* parent = nullptr); virtual void render(); virtual void initialize(); void setAnimating(bool animating); public slots: void renderNow(); protected: bool event(QEvent* event) override; void exposeEvent(QExposeEvent*) override; void keyPressEvent(QKeyEvent* event) override { switch (event->key()) { case Qt::Key_Escape: close(); break; } } }; openglwindow.cpp #include "openglwindow.h" #include <QOpenGLContext> #include <QOpenGLPaintDevice> #include <QPainter> #include <QGuiApplication> #include <QMatrix4x4> #include <QScreen> #include <QtMath> const char* vertexShaderSource = R"( # version 330 core uniform vec4 posAttr; uniform vec4 colAttr; out vec4 col; uniform mat4 matrix; void main() { col = colAttr; gl_Position = matrix * posAttr; } )"; const char* fragmentShaderSource = R"( # version 330 core out vec4 FragColor; in vec4 col; void main() { FragColor = col; } )"; OpenGLWindow::OpenGLWindow(QWindow* parent) : QWindow(parent) { setSurfaceType(QWindow::OpenGLSurface); } void OpenGLWindow::initialize() { m_program = new QOpenGLShaderProgram(this); m_program->addShaderFromSourceCode(QOpenGLShader::Vertex, vertexShaderSource); m_program->addShaderFromSourceCode(QOpenGLShader::Fragment, fragmentShaderSource); m_program->link(); m_posAttr = m_program->uniformLocation("posAttr"); m_colAttr = m_program->uniformLocation("colAttr"); m_matrixUniform = m_program->uniformLocation("matrix"); } void OpenGLWindow::render() { const qreal retinaScale = devicePixelRatio(); glViewport(0, 0, width() * retinaScale, height() * retinaScale); glClear(GL_COLOR_BUFFER_BIT); glClearColor(1.0, 1.0, 1.0, 1.0); m_program->bind(); QMatrix4x4 matrix; matrix.perspective(60.0f, 4.0f / 3.0f, 0.1f, 100.0f); matrix.translate(0, 0, -2); matrix.rotate(100.0f * m_frame / screen()->refreshRate(), 0, 1, 0); m_program->setUniformValue(m_matrixUniform, matrix); static const GLfloat vertices[] = { 0.0f, 0.707f, -0.5f, -0.5f, 0.5f, -0.5f }; static const GLfloat colors[] = { 1.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 1.0f }; glVertexAttribPointer(m_posAttr, 2, GL_FLOAT, GL_FALSE, 0, vertices); glVertexAttribPointer(m_colAttr, 3, GL_FLOAT, GL_FALSE, 0, colors); glEnableVertexAttribArray(m_posAttr); glEnableVertexAttribArray(m_colAttr); glDrawArrays(GL_TRIANGLES, 0, 3); glDisableVertexAttribArray(m_colAttr); glDisableVertexAttribArray(m_posAttr); m_program->release(); ++m_frame; } bool OpenGLWindow::event(QEvent* event) { switch (event->type()) { case QEvent::UpdateRequest: renderNow(); return true; default: return QWindow::event(event); } } void OpenGLWindow::exposeEvent(QExposeEvent*) { if (isExposed()) renderNow(); } void OpenGLWindow::renderNow() { if (!isExposed()) return; bool needsInitialize = false; if (!m_context) { m_context = new QOpenGLContext(this); m_context->setFormat(requestedFormat()); m_context->create(); needsInitialize = true; } m_context->makeCurrent(this); if (needsInitialize) { initializeOpenGLFunctions(); initialize(); } render(); m_context->swapBuffers(this); if (m_animating) requestUpdate(); } void OpenGLWindow::setAnimating(bool animating) { m_animating = animating; } Tried J. M. Arnold suggestion but it led to: QOpenGLFunctions created with non-current context QOpenGLShaderProgram::uniformLocation(posAttr): shader program is not linked QOpenGLShaderProgram::uniformLocation(colAttr): shader program is not linked QOpenGLShaderProgram::uniformLocation(matrix): shader program is not linked ASSERT: "QOpenGLFunctions::isInitialized(d_ptr)" in file /Users/user/Qt/5.15.2/clang_64/lib/QtGui.framework/Headers/qopenglfunctions.h, line 1092 I checked OpenGL version with unsigned int major = glfwGetWindowAttrib(window, GLFW_CONTEXT_VERSION_MAJOR); unsigned int minor = glfwGetWindowAttrib(window, GLFW_CONTEXT_VERSION_MINOR); cout << "oepngl shader version: " << major << "." << minor << endl; And it prints oepngl shader version: 4.1 I'm on M1 macOS. Running in debug mode I see: got fallback qt version 0x50f02 2022-12-04 20:11:35.615922+0800 openglwindow[61955:13089400] SecTaskLoadEntitlements failed error=22 cs_flags=20, pid=61955 2022-12-04 20:11:35.616020+0800 openglwindow[61955:13089400] SecTaskCopyDebugDescription: openglwindow[61955]/0#-1 LF=0 2022-12-04 20:11:37.052719+0800 openglwindow[61955:13089400] [] CurrentVBLDelta returned 0 for display 2 -- ignoring unreasonable value 2022-12-04 20:11:37.052744+0800 openglwindow[61955:13089400] [] [0x10c868c20] Bad CurrentVBLDelta for display 2 is zero. defaulting to 60Hz. 2022-12-04 20:11:37.053344+0800 openglwindow[61955:13089476] [] CurrentVBLDelta returned 0 for display 2 -- ignoring unreasonable value 2022-12-04 20:11:37.053354+0800 openglwindow[61955:13089476] [] [0x10c868c20] Bad CurrentVBLDelta for display 2 is zero. defaulting to 60Hz. 2022-12-04 20:11:37.194804+0800 openglwindow[61955:13089476] [] CurrentVBLDelta returned 0 for display 2 -- ignoring unreasonable value 2022-12-04 20:11:37.194832+0800 openglwindow[61955:13089476] [] [0x10c868c20] Bad CurrentVBLDelta for display 2 is zero. defaulting to 60Hz. 2022-12-04 20:11:37.212722+0800 openglwindow[61955:13089476] [] CurrentVBLDelta returned 0 for display 2 -- ignoring unreasonable value 2022-12-04 20:11:37.212748+0800 openglwindow[61955 :13089476] [] [0x10c868c20] Bad CurrentVBLDelta for display 2 is zero. defaulting to 60Hz. 2022-12-04 20:11:37.229195+0800 openglwindow[61955:13089476] [] CurrentVBLDelta returned 0 for display 2 -- ignoring unreasonable value 2022-12-04 20:11:37.229226+0800 openglwindow[61955:13089476] [] [0x10c868c20] Bad CurrentVBLDelta for display 2 is zero. defaulting to 60Hz. 2022-12-04 20:11:37.245219+0800 openglwindow[61955:13089476] [] CurrentVBLDelta returned 0 for display 2 -- ignoring unreasonable value 2022-12-04 20:11:37.245244+0800 openglwindow[61955:13089476] [] [0x10c868c20] Bad CurrentVBLDelta for display 2 is zero. defaulting to 60Hz. 2022-12-04 20:11:37.262730+0800 openglwindow[61955:13089476] [] CurrentVBLDelta returned 0 for display 2 -- ignoring unreasonable value 2022-12-04 20:11:37.262760+0800 openglwindow[61955:13089476] [] [0x10c868c20] Bad CurrentVBLDelta for display 2 is zero. defaulting to 60Hz. 2022-12-04 20:11:37.279399+0800 openglwindow[61955:13089476] [] CurrentVBLDelta returned 0 for display 2 -- ignoring unreasonable value 2022-12-04 20:11:37.279446+0800 openglwindow[61955:13089476] [] [0x10c868c20] Bad CurrentVBLDelta for display 2 is zero. defaulting to 60Hz. 2022-12-04 20:11:37.296006+0800 openglwindow[61955:13089476] [] CurrentVBLDelta returned 0 for display 2 -- ignoring unreasonable value 2022-12-04 20:11:37.296045+0800 openglwindow[61955:13089476] [] [0x10c868c20] Bad CurrentVBLDelta for display 2 is zero. defaulting to 60Hz. 2022-12-04 20:11:37.312713+0800 openglwindow[61955:13089476] [] CurrentVBLDelta returned 0 for display 2 -- ignoring unreasonable value 2022-12-04 20:11:37.312746+0800 openglwindow[61955:13089476] [] [0x10c868c20] Bad CurrentVBLDelta for display 2 is zero. defaulting to 60Hz. 2022-12-04 20:11:37.329405+0800 openglwindow[61955:13089476] [] CurrentVBLDelta returned 0 for display 2 -- ignoring unreasonable value 2022-12-04 20:11:37.329449+0800 openglwindow[61955 A: The mistake is inside your eposeEvent: void OpenGLWindow::exposeEvent(QExposeEvent*) { if (isExposed()) renderNow(); } You forgot to initialize the OpenGL context: Call initialize(); and render(); instead of your renderNow();! This should be working finally: void OpenGLWindow::exposeEvent(QExposeEvent*) { if (isExposed()) { initialize(); render(); } } By the way: renderNow() initializes the method OpenGLWindow::render() but it does not initialize the method OpenGLWindow::initialize() (-> OpenGLWindow::initialize() will only be called once when the OpenGLWindow object is first created, but OpenGLWindow::render() will be called repeatedly whenever renderNow() is called)
Changing Qt OpenGL Window Example to use OpenGL 3.3
I'm trying to change the Qt OpenGL Example to use more modern opengl, version 330 seems to be suitable. So I did: set the version and profile on main.cpp set version of shaders changed shaders to use uniform It now builds without any errors however I see just a blank window. What am I getting wrong? Perhaps multiple things? main.cpp #include <QGuiApplication> #include "openglwindow.h" int main(int argc, char** argv) { QSurfaceFormat fmt; fmt.setSamples(16); fmt.setDepthBufferSize(24); fmt.setVersion(3, 3); fmt.setProfile(QSurfaceFormat::CoreProfile); QGuiApplication app(argc, argv); OpenGLWindow window; window.setFormat(fmt); window.resize(640, 480); window.show(); window.setAnimating(true); return app.exec(); } openglwindow.h #include <QWindow> #include <QOpenGLFunctions> class QPainter; class QOpenGLContext; class QOpenGLPaintDevice; #include <QOpenGLShaderProgram> #include <QKeyEvent> class OpenGLWindow : public QWindow, protected QOpenGLFunctions { Q_OBJECT bool m_animating = false; QOpenGLContext* m_context = nullptr; QOpenGLPaintDevice* m_device = nullptr; GLint m_posAttr = 0; GLint m_colAttr = 0; GLint m_matrixUniform = 0; QOpenGLShaderProgram* m_program = nullptr; int m_frame = 0; public: explicit OpenGLWindow(QWindow* parent = nullptr); virtual void render(); virtual void initialize(); void setAnimating(bool animating); public slots: void renderNow(); protected: bool event(QEvent* event) override; void exposeEvent(QExposeEvent*) override; void keyPressEvent(QKeyEvent* event) override { switch (event->key()) { case Qt::Key_Escape: close(); break; } } }; openglwindow.cpp #include "openglwindow.h" #include <QOpenGLContext> #include <QOpenGLPaintDevice> #include <QPainter> #include <QGuiApplication> #include <QMatrix4x4> #include <QScreen> #include <QtMath> const char* vertexShaderSource = R"( # version 330 core uniform vec4 posAttr; uniform vec4 colAttr; out vec4 col; uniform mat4 matrix; void main() { col = colAttr; gl_Position = matrix * posAttr; } )"; const char* fragmentShaderSource = R"( # version 330 core out vec4 FragColor; in vec4 col; void main() { FragColor = col; } )"; OpenGLWindow::OpenGLWindow(QWindow* parent) : QWindow(parent) { setSurfaceType(QWindow::OpenGLSurface); } void OpenGLWindow::initialize() { m_program = new QOpenGLShaderProgram(this); m_program->addShaderFromSourceCode(QOpenGLShader::Vertex, vertexShaderSource); m_program->addShaderFromSourceCode(QOpenGLShader::Fragment, fragmentShaderSource); m_program->link(); m_posAttr = m_program->uniformLocation("posAttr"); m_colAttr = m_program->uniformLocation("colAttr"); m_matrixUniform = m_program->uniformLocation("matrix"); } void OpenGLWindow::render() { const qreal retinaScale = devicePixelRatio(); glViewport(0, 0, width() * retinaScale, height() * retinaScale); glClear(GL_COLOR_BUFFER_BIT); glClearColor(1.0, 1.0, 1.0, 1.0); m_program->bind(); QMatrix4x4 matrix; matrix.perspective(60.0f, 4.0f / 3.0f, 0.1f, 100.0f); matrix.translate(0, 0, -2); matrix.rotate(100.0f * m_frame / screen()->refreshRate(), 0, 1, 0); m_program->setUniformValue(m_matrixUniform, matrix); static const GLfloat vertices[] = { 0.0f, 0.707f, -0.5f, -0.5f, 0.5f, -0.5f }; static const GLfloat colors[] = { 1.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 1.0f }; glVertexAttribPointer(m_posAttr, 2, GL_FLOAT, GL_FALSE, 0, vertices); glVertexAttribPointer(m_colAttr, 3, GL_FLOAT, GL_FALSE, 0, colors); glEnableVertexAttribArray(m_posAttr); glEnableVertexAttribArray(m_colAttr); glDrawArrays(GL_TRIANGLES, 0, 3); glDisableVertexAttribArray(m_colAttr); glDisableVertexAttribArray(m_posAttr); m_program->release(); ++m_frame; } bool OpenGLWindow::event(QEvent* event) { switch (event->type()) { case QEvent::UpdateRequest: renderNow(); return true; default: return QWindow::event(event); } } void OpenGLWindow::exposeEvent(QExposeEvent*) { if (isExposed()) renderNow(); } void OpenGLWindow::renderNow() { if (!isExposed()) return; bool needsInitialize = false; if (!m_context) { m_context = new QOpenGLContext(this); m_context->setFormat(requestedFormat()); m_context->create(); needsInitialize = true; } m_context->makeCurrent(this); if (needsInitialize) { initializeOpenGLFunctions(); initialize(); } render(); m_context->swapBuffers(this); if (m_animating) requestUpdate(); } void OpenGLWindow::setAnimating(bool animating) { m_animating = animating; } Tried J. M. Arnold suggestion but it led to: QOpenGLFunctions created with non-current context QOpenGLShaderProgram::uniformLocation(posAttr): shader program is not linked QOpenGLShaderProgram::uniformLocation(colAttr): shader program is not linked QOpenGLShaderProgram::uniformLocation(matrix): shader program is not linked ASSERT: "QOpenGLFunctions::isInitialized(d_ptr)" in file /Users/user/Qt/5.15.2/clang_64/lib/QtGui.framework/Headers/qopenglfunctions.h, line 1092 I checked OpenGL version with unsigned int major = glfwGetWindowAttrib(window, GLFW_CONTEXT_VERSION_MAJOR); unsigned int minor = glfwGetWindowAttrib(window, GLFW_CONTEXT_VERSION_MINOR); cout << "oepngl shader version: " << major << "." << minor << endl; And it prints oepngl shader version: 4.1 I'm on M1 macOS. Running in debug mode I see: got fallback qt version 0x50f02 2022-12-04 20:11:35.615922+0800 openglwindow[61955:13089400] SecTaskLoadEntitlements failed error=22 cs_flags=20, pid=61955 2022-12-04 20:11:35.616020+0800 openglwindow[61955:13089400] SecTaskCopyDebugDescription: openglwindow[61955]/0#-1 LF=0 2022-12-04 20:11:37.052719+0800 openglwindow[61955:13089400] [] CurrentVBLDelta returned 0 for display 2 -- ignoring unreasonable value 2022-12-04 20:11:37.052744+0800 openglwindow[61955:13089400] [] [0x10c868c20] Bad CurrentVBLDelta for display 2 is zero. defaulting to 60Hz. 2022-12-04 20:11:37.053344+0800 openglwindow[61955:13089476] [] CurrentVBLDelta returned 0 for display 2 -- ignoring unreasonable value 2022-12-04 20:11:37.053354+0800 openglwindow[61955:13089476] [] [0x10c868c20] Bad CurrentVBLDelta for display 2 is zero. defaulting to 60Hz. 2022-12-04 20:11:37.194804+0800 openglwindow[61955:13089476] [] CurrentVBLDelta returned 0 for display 2 -- ignoring unreasonable value 2022-12-04 20:11:37.194832+0800 openglwindow[61955:13089476] [] [0x10c868c20] Bad CurrentVBLDelta for display 2 is zero. defaulting to 60Hz. 2022-12-04 20:11:37.212722+0800 openglwindow[61955:13089476] [] CurrentVBLDelta returned 0 for display 2 -- ignoring unreasonable value 2022-12-04 20:11:37.212748+0800 openglwindow[61955 :13089476] [] [0x10c868c20] Bad CurrentVBLDelta for display 2 is zero. defaulting to 60Hz. 2022-12-04 20:11:37.229195+0800 openglwindow[61955:13089476] [] CurrentVBLDelta returned 0 for display 2 -- ignoring unreasonable value 2022-12-04 20:11:37.229226+0800 openglwindow[61955:13089476] [] [0x10c868c20] Bad CurrentVBLDelta for display 2 is zero. defaulting to 60Hz. 2022-12-04 20:11:37.245219+0800 openglwindow[61955:13089476] [] CurrentVBLDelta returned 0 for display 2 -- ignoring unreasonable value 2022-12-04 20:11:37.245244+0800 openglwindow[61955:13089476] [] [0x10c868c20] Bad CurrentVBLDelta for display 2 is zero. defaulting to 60Hz. 2022-12-04 20:11:37.262730+0800 openglwindow[61955:13089476] [] CurrentVBLDelta returned 0 for display 2 -- ignoring unreasonable value 2022-12-04 20:11:37.262760+0800 openglwindow[61955:13089476] [] [0x10c868c20] Bad CurrentVBLDelta for display 2 is zero. defaulting to 60Hz. 2022-12-04 20:11:37.279399+0800 openglwindow[61955:13089476] [] CurrentVBLDelta returned 0 for display 2 -- ignoring unreasonable value 2022-12-04 20:11:37.279446+0800 openglwindow[61955:13089476] [] [0x10c868c20] Bad CurrentVBLDelta for display 2 is zero. defaulting to 60Hz. 2022-12-04 20:11:37.296006+0800 openglwindow[61955:13089476] [] CurrentVBLDelta returned 0 for display 2 -- ignoring unreasonable value 2022-12-04 20:11:37.296045+0800 openglwindow[61955:13089476] [] [0x10c868c20] Bad CurrentVBLDelta for display 2 is zero. defaulting to 60Hz. 2022-12-04 20:11:37.312713+0800 openglwindow[61955:13089476] [] CurrentVBLDelta returned 0 for display 2 -- ignoring unreasonable value 2022-12-04 20:11:37.312746+0800 openglwindow[61955:13089476] [] [0x10c868c20] Bad CurrentVBLDelta for display 2 is zero. defaulting to 60Hz. 2022-12-04 20:11:37.329405+0800 openglwindow[61955:13089476] [] CurrentVBLDelta returned 0 for display 2 -- ignoring unreasonable value 2022-12-04 20:11:37.329449+0800 openglwindow[61955
[ "The mistake is inside your eposeEvent:\nvoid OpenGLWindow::exposeEvent(QExposeEvent*)\n{\n if (isExposed())\n renderNow();\n}\n\nYou forgot to initialize the OpenGL context: Call initialize(); and render(); instead of your renderNow();!\nThis should be working finally:\nvoid OpenGLWindow::exposeEvent(QExposeEvent*)\n{\n if (isExposed()) {\n initialize();\n render();\n }\n}\n\nBy the way: renderNow() initializes the method OpenGLWindow::render() but it does not initialize the method OpenGLWindow::initialize() (-> OpenGLWindow::initialize() will only be called once when the OpenGLWindow object is first created, but OpenGLWindow::render() will be called repeatedly whenever renderNow() is called)\n" ]
[ 0 ]
[]
[]
[ "c++", "glsl", "opengl", "qt", "shader" ]
stackoverflow_0074674766_c++_glsl_opengl_qt_shader.txt
Q: Recognizing matrix from image I have written algorithm that solves the pluszle game matrix. Input is numpy array. Now I want to recognize the digits of matrix from screenshot. there are different levels, this is hard one this is easy one the output of recognition should be numpy array array([[6, 2, 4, 2], [7, 8, 9, 7], [1, 2, 4, 4], [7, 2, 4, 0]]) I have tried to feed last image to tesseract from PIL import Image import pytesseract pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe' print(pytesseract.image_to_string(Image.open('C:/Users/79017/screen_plus.jpg'))) The output is unacceptable LEVEL 4 (}00:03 M0 J] —.°—@—@© I think that I should use contours from opencv, because the font is always the same. maybe I should save contours for every digit, than save every countour that exist on screenshot than somehow make matrix from coordinates of every digit-contour. But I have no idea how to do it. A: 1- Binarize Tesseract needs you to binarize the image first. No need for contour or any convolution here. Just a threshold should do. Especially considering that you are trying to che... I mean win intelligently to a specific game. So I guess you are open to some ad-hoc adjustments. For example, (hard<240).any(axis=2) put in white (True) everything that is not white on the original image, and black the white parts. Note that you don't the the sums (or whatever they are, I don't know what this game is) here. Which are on the contrary almost black areas But you can have them with another filter (hard>120).any(axis=2) You could merge those filters, obviously (hard<240).any(axis=2) & (hard>120).any(axis=2) But that may not be a good idea: after all, it gives you an opportunity to distinguish to different kind of data, why you may want to do. 2- Restrict Secondly, you know you are looking for digits, so, restrict to digits. By adding config='digits' to your pytesseract args. pytesseract.image_to_string((hard>240).all(axis=2)) # 'LEVEL10\nNOVEMBER 2022\n\n™\noe\nOs\nfoo)\nso\n‘|\noO\n\n9949 6 2 2 8\n\nN W\nN ©\nOo w\nVon\n+? ah ®)\nas\noOo\n©\n\n \n\x0c' pytesseract.image_to_string((hard>240).all(axis=2), config='digits') # '10\n2022\n\n99496228\n\n17\n-\n\n \n\x0c' 3- Don't use image_to_string Use image_to_data preferably. It gives you bounding boxes of text. Or even image_to_boxes which give you digits one by one, with coordinates Because image_to_string is for when you have a good old linear text in the image. image_to_data or image_to_boxes assumes that text is distributed all around, and give you piece of text with position. image_to_string on such image may intervert what you would consider the logical order 4- Select areas yourself Since it is an ad-hoc usage for a specific application, you know where the data are. For example, your main matrix seems to be in area hard[740:1512, 132:910] See print(pytesseract.image_to_boxes((hard[740:1512, 132:910]<240).any(axis=2), config='digits')) Not only it avoids flooding you with irrelevant data. But also, tesseract performs better when called only with an image without other things than what you want to read. Seems to have almost all your digits here. 5- Don't expect for miracles Tesseract is one of the best OCR. But OCR are not a sure thing... See what I get with this code (summarizing what I've said so far) hard=hard[740:1512, 132:910] hard=(hard<240).any(axis=2) boxes=[s.split(' ') for s in pytesseract.image_to_boxes(hard, config='digits').split('\n')[:-1]] out=255*np.stack([hard, hard, hard], axis=2).astype(np.uint8) H=len(hard) for b in boxes: cv2.putText(out, b[0], (30+int(b[1]), H-int(b[2])), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2) As you can see, result are fairly good. But there are 5 missing numbers. And one 3 was read as "3.". For this kind of ad-hoc reading of an app, I wouldn't even use tesseract. I am pretty sure that, with trial and errors, you can easily learn to extract each digits box your self (there are linearly spaced in both dimension). And then, inside each box, well there are only 9 possible values. Should be quite easy, on a generated image, to find some easy criterions, such as the number of white pixels, number of white pixels in top area, ..., that permits a very simple classification A: You might want to pre-process the image first. By applying a filter, you can, for example, get the contours of an image. The basic idea of a filter, is to 'slide' some matrix of values over the image, and multiply every pixel value by the value inside the matrix. This process is called convolution. Convolution helps out here, because all irrelevant information is discarded, and thus it is made easier for tesseract to 'read' the image. This might help you out: https://medium.com/swlh/image-processing-with-python-convolutional-filters-and-kernels-b9884d91a8fd
Recognizing matrix from image
I have written algorithm that solves the pluszle game matrix. Input is numpy array. Now I want to recognize the digits of matrix from screenshot. there are different levels, this is hard one this is easy one the output of recognition should be numpy array array([[6, 2, 4, 2], [7, 8, 9, 7], [1, 2, 4, 4], [7, 2, 4, 0]]) I have tried to feed last image to tesseract from PIL import Image import pytesseract pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe' print(pytesseract.image_to_string(Image.open('C:/Users/79017/screen_plus.jpg'))) The output is unacceptable LEVEL 4 (}00:03 M0 J] —.°—@—@© I think that I should use contours from opencv, because the font is always the same. maybe I should save contours for every digit, than save every countour that exist on screenshot than somehow make matrix from coordinates of every digit-contour. But I have no idea how to do it.
[ "1- Binarize\nTesseract needs you to binarize the image first. No need for contour or any convolution here. Just a threshold should do. Especially considering that you are trying to che... I mean win intelligently to a specific game. So I guess you are open to some ad-hoc adjustments.\nFor example, (hard<240).any(axis=2) put in white (True) everything that is not white on the original image, and black the white parts.\n\nNote that you don't the the sums (or whatever they are, I don't know what this game is) here. Which are on the contrary almost black areas\nBut you can have them with another filter\n(hard>120).any(axis=2)\n\n\nYou could merge those filters, obviously\n(hard<240).any(axis=2) & (hard>120).any(axis=2)\n\n\nBut that may not be a good idea: after all, it gives you an opportunity to distinguish to different kind of data, why you may want to do.\n2- Restrict\nSecondly, you know you are looking for digits, so, restrict to digits. By adding config='digits' to your pytesseract args.\npytesseract.image_to_string((hard>240).all(axis=2))\n# 'LEVEL10\\nNOVEMBER 2022\\n\\n™\\noe\\nOs\\nfoo)\\nso\\n‘|\\noO\\n\\n9949 6 2 2 8\\n\\nN W\\nN ©\\nOo w\\nVon\\n+? ah ®)\\nas\\noOo\\n©\\n\\n \\n\\x0c'\n\npytesseract.image_to_string((hard>240).all(axis=2), config='digits')\n# '10\\n2022\\n\\n99496228\\n\\n17\\n-\\n\\n \\n\\x0c'\n\n3- Don't use image_to_string\nUse image_to_data preferably.\nIt gives you bounding boxes of text.\nOr even image_to_boxes which give you digits one by one, with coordinates\nBecause image_to_string is for when you have a good old linear text in the image. image_to_data or image_to_boxes assumes that text is distributed all around, and give you piece of text with position.\nimage_to_string on such image may intervert what you would consider the logical order\n4- Select areas yourself\nSince it is an ad-hoc usage for a specific application, you know where the data are.\nFor example, your main matrix seems to be in area\nhard[740:1512, 132:910]\n\n\nSee\nprint(pytesseract.image_to_boxes((hard[740:1512, 132:910]<240).any(axis=2), config='digits'))\n\nNot only it avoids flooding you with irrelevant data. But also, tesseract performs better when called only with an image without other things than what you want to read.\nSeems to have almost all your digits here.\n5- Don't expect for miracles\nTesseract is one of the best OCR. But OCR are not a sure thing...\nSee what I get with this code (summarizing what I've said so far)\nhard=hard[740:1512, 132:910]\nhard=(hard<240).any(axis=2)\nboxes=[s.split(' ') for s in pytesseract.image_to_boxes(hard, config='digits').split('\\n')[:-1]]\nout=255*np.stack([hard, hard, hard], axis=2).astype(np.uint8)\nH=len(hard)\nfor b in boxes:\n cv2.putText(out, b[0], (30+int(b[1]), H-int(b[2])), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2)\n\n\nAs you can see, result are fairly good. But there are 5 missing numbers. And one 3 was read as \"3.\".\nFor this kind of ad-hoc reading of an app, I wouldn't even use tesseract. I am pretty sure that, with trial and errors, you can easily learn to extract each digits box your self (there are linearly spaced in both dimension).\nAnd then, inside each box, well there are only 9 possible values. Should be quite easy, on a generated image, to find some easy criterions, such as the number of white pixels, number of white pixels in top area, ..., that permits a very simple classification\n", "You might want to pre-process the image first. By applying a filter, you can, for example, get the contours of an image.\nThe basic idea of a filter, is to 'slide' some matrix of values over the image, and multiply every pixel value by the value inside the matrix. This process is called convolution.\nConvolution helps out here, because all irrelevant information is discarded, and thus it is made easier for tesseract to 'read' the image.\nThis might help you out: https://medium.com/swlh/image-processing-with-python-convolutional-filters-and-kernels-b9884d91a8fd\n" ]
[ 1, 0 ]
[]
[]
[ "computer_vision", "opencv", "python", "python_tesseract", "tesseract" ]
stackoverflow_0074674268_computer_vision_opencv_python_python_tesseract_tesseract.txt