commit
stringlengths
40
40
subject
stringlengths
1
1.49k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
new_contents
stringlengths
1
29.8k
old_contents
stringlengths
0
9.9k
lang
stringclasses
3 values
proba
float64
0
1
ca8dec97321fdf2ceee459b95c3d885edebca15b
Bump DeletionWatcher up to 20 minutes
deletionwatcher.py
deletionwatcher.py
import json import requests import time import websocket from bs4 import BeautifulSoup from threading import Thread from metasmoke import Metasmoke from globalvars import GlobalVars from datahandling import is_false_positive, is_ignored_post, get_post_site_id_link class DeletionWatcher: @classmethod def update_site_id_list(self): soup = BeautifulSoup(requests.get("http://meta.stackexchange.com/topbar/site-switcher/site-list").text) site_id_dict = {} for site in soup.findAll("a", attrs={"data-id": True}): site_name = site["href"][2:] site_id = site["data-id"] site_id_dict[site_name] = site_id GlobalVars.site_id_dict = site_id_dict @classmethod def check_websocket_for_deletion(self, post_site_id, post_url, timeout): time_to_check = time.time() + timeout post_id = post_site_id[0] post_type = post_site_id[2] if post_type == "answer": question_id = str(get_post_site_id_link(post_site_id)) if question_id is None: return else: question_id = post_id post_site = post_site_id[1] if post_site not in GlobalVars.site_id_dict: return site_id = GlobalVars.site_id_dict[post_site] ws = websocket.create_connection("ws://qa.sockets.stackexchange.com/") ws.send(site_id + "-question-" + question_id) while time.time() < time_to_check: ws.settimeout(time_to_check - time.time()) try: a = ws.recv() except websocket.WebSocketTimeoutException: t_metasmoke = Thread(target=Metasmoke.send_deletion_stats_for_post, args=(post_url, False)) t_metasmoke.start() return False if a is not None and a != "": try: d = json.loads(json.loads(a)["data"]) except: continue if d["a"] == "post-deleted" and str(d["qId"]) == question_id and ((post_type == "answer" and "aId" in d and str(d["aId"]) == post_id) or post_type == "question"): t_metasmoke = Thread(target=Metasmoke.send_deletion_stats_for_post, args=(post_url, True)) t_metasmoke.start() return True t_metasmoke = Thread(target=Metasmoke.send_deletion_stats_for_post, args=(post_url, False)) t_metasmoke.start() return False @classmethod def check_if_report_was_deleted(self, post_site_id, post_url, message): was_report_deleted = self.check_websocket_for_deletion(post_site_id, post_url, 1200) if was_report_deleted: try: message.delete() except: pass @classmethod def post_message_if_not_deleted(self, post_site_id, post_url, message_text, room): was_report_deleted = self.check_websocket_for_deletion(post_site_id, post_url, 300) if not was_report_deleted and not is_false_positive(post_site_id[0:2]) and not is_ignored_post(post_site_id[0:2]): room.send_message(message_text)
import json import requests import time import websocket from bs4 import BeautifulSoup from threading import Thread from metasmoke import Metasmoke from globalvars import GlobalVars from datahandling import is_false_positive, is_ignored_post, get_post_site_id_link class DeletionWatcher: @classmethod def update_site_id_list(self): soup = BeautifulSoup(requests.get("http://meta.stackexchange.com/topbar/site-switcher/site-list").text) site_id_dict = {} for site in soup.findAll("a", attrs={"data-id": True}): site_name = site["href"][2:] site_id = site["data-id"] site_id_dict[site_name] = site_id GlobalVars.site_id_dict = site_id_dict @classmethod def check_websocket_for_deletion(self, post_site_id, post_url, timeout): time_to_check = time.time() + timeout post_id = post_site_id[0] post_type = post_site_id[2] if post_type == "answer": question_id = str(get_post_site_id_link(post_site_id)) if question_id is None: return else: question_id = post_id post_site = post_site_id[1] if post_site not in GlobalVars.site_id_dict: return site_id = GlobalVars.site_id_dict[post_site] ws = websocket.create_connection("ws://qa.sockets.stackexchange.com/") ws.send(site_id + "-question-" + question_id) while time.time() < time_to_check: ws.settimeout(time_to_check - time.time()) try: a = ws.recv() except websocket.WebSocketTimeoutException: t_metasmoke = Thread(target=Metasmoke.send_deletion_stats_for_post, args=(post_url, False)) t_metasmoke.start() return False if a is not None and a != "": try: d = json.loads(json.loads(a)["data"]) except: continue if d["a"] == "post-deleted" and str(d["qId"]) == question_id and ((post_type == "answer" and "aId" in d and str(d["aId"]) == post_id) or post_type == "question"): t_metasmoke = Thread(target=Metasmoke.send_deletion_stats_for_post, args=(post_url, True)) t_metasmoke.start() return True t_metasmoke = Thread(target=Metasmoke.send_deletion_stats_for_post, args=(post_url, False)) t_metasmoke.start() return False @classmethod def check_if_report_was_deleted(self, post_site_id, post_url, message): was_report_deleted = self.check_websocket_for_deletion(post_site_id, post_url, 600) if was_report_deleted: try: message.delete() except: pass @classmethod def post_message_if_not_deleted(self, post_site_id, post_url, message_text, room): was_report_deleted = self.check_websocket_for_deletion(post_site_id, post_url, 300) if not was_report_deleted and not is_false_positive(post_site_id[0:2]) and not is_ignored_post(post_site_id[0:2]): room.send_message(message_text)
Python
0
a492e805fa51940d746a1d251232bc4f13417165
fix waftools/man.py to install manpages again.
waftools/man.py
waftools/man.py
import Common, Object, Utils, Node, Params import sys, os import gzip from misc import copyobj def gzip_func(task): env = task.m_env infile = task.m_inputs[0].abspath(env) outfile = task.m_outputs[0].abspath(env) input = open(infile, 'r') output = gzip.GzipFile(outfile, mode='w') output.write(input.read()) return 0 class manobj(copyobj): def __init__(self, section=1, type='none'): copyobj.__init__(self, type) self.fun = gzip_func self.files = [] self.section = section def apply(self): lst = self.to_list(self.files) for file in lst: node = self.path.find_source(file) if not node: fatal('cannot find input file %s for processing' % file) target = self.target if not target or len(lst)>1: target = node.m_name newnode = self.path.find_build(file+'.gz') #target? if not newnode: newnode = Node.Node(file+'.gz', self.path) self.path.append_build(newnode) task = self.create_task('copy', self.env, 8) task.set_inputs(node) task.set_outputs(newnode) task.m_env = self.env task.fun = self.fun if Params.g_commands['install'] or Params.g_commands['uninstall']: Common.install_files('MANDIR', 'man' + str(self.section), newnode.abspath(self.env)) def setup(env): Object.register('man', manobj) def detect(conf): return 1
import Common, Object, Utils, Node, Params import sys, os import gzip from misc import copyobj def gzip_func(task): env = task.m_env infile = task.m_inputs[0].abspath(env) outfile = task.m_outputs[0].abspath(env) input = open(infile, 'r') output = gzip.GzipFile(outfile, mode='w') output.write(input.read()) return 0 class manobj(copyobj): def __init__(self, section=1, type='none'): copyobj.__init__(self, type) self.fun = gzip_func self.files = [] self.section = section def apply(self): lst = self.to_list(self.source) for file in lst: node = self.path.find_source(file) if not node: fatal('cannot find input file %s for processing' % file) target = self.target if not target or len(lst)>1: target = node.m_name newnode = self.path.find_build(file+'.gz') #target? if not newnode: newnode = Node.Node(file+'.gz', self.path) self.path.append_build(newnode) task = self.create_task('copy', self.env, 8) task.set_inputs(node) task.set_outputs(newnode) task.m_env = self.env task.fun = self.fun if Params.g_commands['install'] or Params.g_commands['uninstall']: Common.install_files('MANDIR', 'man' + str(self.section), newnode.abspath(self.env)) def setup(env): Object.register('man', manobj) def detect(conf): return 1
Python
0
f6648a0206258e911c0fe4c9c8d0b8cd1334d119
Test also what it returns from the api_translate view
appcomposer/tests/translator/test_sync.py
appcomposer/tests/translator/test_sync.py
import json from flask import request from mock import patch from appcomposer.login import graasp_oauth_login_redirect from appcomposer.tests.translator.fake_requests import create_requests_mock from appcomposer.tests.utils import ComposerTest from appcomposer.translator.tasks import synchronize_apps_no_cache_wrapper from appcomposer.translator.views import api_translations2, api_translate from appcomposer.translator.mongodb_pusher import mongo_translation_urls, mongo_bundles class TranslatorTest(ComposerTest): def setUp(self): super(TranslatorTest, self).setUp() mongo_translation_urls.remove() mongo_bundles.remove() def assertApp1(self): # Check MongoDB (English and Spanish) resultEngUrl = mongo_translation_urls.find_one({'_id':'en_ALL_ALL::http://url1/languages/en_ALL.xml'}) resultEngApp = mongo_bundles.find_one({'_id':'en_ALL_ALL::http://url1/gadget.xml'}) self.assertEquals(resultEngUrl['data'], resultEngApp['data']) data = json.loads(resultEngUrl['data']) self.assertEquals("Message1_1", data['message1_1']) self.assertEquals("Message2_1", data['message2_1']) self.assertEquals("Message3_1", data['message3_1']) self.assertEquals("Message4_1", data['message4_1']) resultSpaUrl = mongo_translation_urls.find_one({'_id':'es_ALL_ALL::http://url1/languages/en_ALL.xml'}) resultSpaApp = mongo_bundles.find_one({'_id':'es_ALL_ALL::http://url1/gadget.xml'}) self.assertEquals(resultSpaUrl['data'], resultSpaApp['data']) data = json.loads(resultSpaUrl['data']) self.assertEquals("Mensaje1_1", data['message1_1']) self.assertEquals("Mensaje2_1", data['message2_1']) self.assertEquals("Mensaje3_1", data['message3_1']) # This is self-filled by its English version self.assertEquals("Message4_1", data['message4_1']) request.args = {'app_url' : 'http://url1/gadget.xml'} # Check API english_results = api_translate('en_ALL', 'ALL').json self.assertFalse(english_results['automatic']) self.assertEquals(english_results['url'], 'http://url1/gadget.xml') message1_1 = english_results['translation']['message1_1'] self.assertFalse(message1_1['can_edit']) self.assertFalse(message1_1['from_default']) self.assertEquals("Message1_1", message1_1['source']) self.assertEquals("Message1_1", message1_1['target']) # In Spanish, the fourth message is special spanish_results = api_translate('es_ALL', 'ALL').json self.assertFalse(spanish_results['automatic']) self.assertEquals(spanish_results['url'], 'http://url1/gadget.xml') message1_1 = spanish_results['translation']['message1_1'] self.assertFalse(message1_1['can_edit']) self.assertFalse(message1_1['from_default']) self.assertEquals("Message1_1", message1_1['source']) self.assertEquals("Mensaje1_1", message1_1['target']) message4_1 = spanish_results['translation']['message4_1'] self.assertTrue(message4_1['can_edit']) self.assertTrue(message4_1['from_default']) self.assertEquals("Message4_1", message4_1['source']) self.assertEquals("Message4_1", message4_1['target']) # There is no translation to French, so it's automatic french_results = api_translate('fr_ALL', 'ALL').json french_results = api_translate('fr_ALL', 'ALL').json # TODO: this is a bug! # self.assertTrue(french_results['automatic']) self.assertEquals(french_results['url'], 'http://url1/gadget.xml') message1_1 = french_results['translation']['message1_1'] self.assertTrue(message1_1['can_edit']) self.assertFalse(message1_1['from_default']) self.assertEquals("Message1_1", message1_1['source']) self.assertIsNone(message1_1['target']) message4_1 = french_results['translation']['message4_1'] self.assertTrue(message4_1['can_edit']) self.assertFalse(message4_1['from_default']) self.assertEquals("Message4_1", message4_1['source']) self.assertIsNone(message4_1['target']) def assertApps(self): self.assertApp1() class TestSync(TranslatorTest): @patch("appcomposer.translator.utils.get_cached_session") @patch("requests.Session") def test_sync(self, mock_requests, mock_requests_cached_session): mock_requests().get = create_requests_mock() mock_requests_cached_session().get = create_requests_mock() graasp_oauth_login_redirect() synchronize_apps_no_cache_wrapper(None) self.assertApps() synchronize_apps_no_cache_wrapper(None) self.assertApps() @patch("appcomposer.translator.utils.get_cached_session") def test_sync2(self, mock): mock().get = create_requests_mock() synchronize_apps_no_cache_wrapper(None)
import json from mock import patch from appcomposer.login import graasp_oauth_login_redirect from appcomposer.tests.translator.fake_requests import create_requests_mock from appcomposer.tests.utils import ComposerTest from appcomposer.translator.tasks import synchronize_apps_no_cache_wrapper from appcomposer.translator.views import api_translations2 from appcomposer.translator.mongodb_pusher import mongo_translation_urls, mongo_bundles class TranslatorTest(ComposerTest): def setUp(self): super(TranslatorTest, self).setUp() mongo_translation_urls.remove() mongo_bundles.remove() def assertApp1(self): resultEngUrl = mongo_translation_urls.find_one({'_id':'en_ALL_ALL::http://url1/languages/en_ALL.xml'}) resultEngApp = mongo_bundles.find_one({'_id':'en_ALL_ALL::http://url1/gadget.xml'}) self.assertEquals(resultEngUrl['data'], resultEngApp['data']) data = json.loads(resultEngUrl['data']) self.assertEquals('Message1_1', data['message1_1']) self.assertEquals('Message2_1', data['message2_1']) self.assertEquals('Message3_1', data['message3_1']) self.assertEquals('Message4_1', data['message4_1']) resultSpaUrl = mongo_translation_urls.find_one({'_id':'es_ALL_ALL::http://url1/languages/en_ALL.xml'}) resultSpaApp = mongo_bundles.find_one({'_id':'es_ALL_ALL::http://url1/gadget.xml'}) self.assertEquals(resultSpaUrl['data'], resultSpaApp['data']) data = json.loads(resultSpaUrl['data']) self.assertEquals('Mensaje1_1', data['message1_1']) self.assertEquals('Mensaje2_1', data['message2_1']) self.assertEquals('Mensaje3_1', data['message3_1']) # This is self-filled by its English version self.assertEquals('Message4_1', data['message4_1']) class TestSync(TranslatorTest): @patch("appcomposer.translator.utils.get_cached_session") @patch("requests.Session") def test_sync(self, mock_requests, mock_requests_cached_session): mock_requests().get = create_requests_mock() mock_requests_cached_session().get = create_requests_mock() graasp_oauth_login_redirect() synchronize_apps_no_cache_wrapper(None) self.assertApp1() @patch("appcomposer.translator.utils.get_cached_session") def test_sync2(self, mock): mock().get = create_requests_mock() synchronize_apps_no_cache_wrapper(None)
Python
0
89c1b58da23cfe16e8e195c61313b818a6d5f890
Add persist.py
darwin/persist.py
darwin/persist.py
import joblib from .version import __version__, VERSION class PersistenceMixin(object): """ Mixin that adds joblib persistence load and save function to any class. """ @classmethod def from_file(cls, objdump_path): ''' Parameters ---------- objdump_path: str Path to the object dump file. Returns ------- instance New instance of an object from the pickle at the specified path. ''' obj_version, object = joblib.load(objdump_path) # Check that we've actually loaded a PersistenceMixin (or sub-class) if not isinstance(object, cls): raise ValueError(('The pickle stored at {} does not contain ' + 'a {} object.').format(objdump_path, cls)) # Check that versions are compatible. (Currently, this just checks # that major versions match) elif obj_version[0] == VERSION[0]: if not hasattr(object, 'sampler'): object.sampler = None return object else: raise ValueError(("{} stored in pickle file {} was created with version {} " "of {}, which is incompatible with the current version " "{}").format(cls, objdump_path, __name__, '.'.join(obj_version), '.'.join(VERSION))) def load(self, objdump_path): '''Replace the current object instance with a saved object. Parameters ---------- objdump_path: str The path to the file to load. ''' del self.__dict__ self.__dict__ = PersistenceMixin.from_file(objdump_path).__dict__ def save(self, objdump_path): '''Save the object to a file. Parameters ---------- objdump_path: str The path to where you want to save the object. ''' # create the directory if it doesn't exist learner_dir = os.path.dirname(objdump_path) if not os.path.exists(learner_dir): os.makedirs(learner_dir) # write out the files joblib.dump((VERSION, self), objdump_path)
import joblib from .version import __version__, VERSION class PersistenceMixin(object): """ Mixin that adds joblib persistence load and save function to any class. """ @classmethod def from_file(cls, objdump_path): ''' Parameters ---------- objdump_path: str Path to the object dump file. Returns ------- instance New instance of an object from the pickle at the specified path. ''' obj_version, object = joblib.load(objdump_path) # Check that we've actually loaded a PersistenceMixin (or sub-class) if not isinstance(object, cls): raise ValueError(('The pickle stored at {} does not contain ' + 'a {} object.').format(objdump_path, cls)) # Check that versions are compatible. (Currently, this just checks # that major versions match) elif obj_version[0] == VERSION[0]: if not hasattr(object, 'sampler'): object.sampler = None return learner else: raise ValueError(("{} stored in pickle file {} was created with version {} " "of {}, which is incompatible with the current version " "{}").format(cls, objdump_path, __name__, '.'.join(obj_version), '.'.join(VERSION))) def load(self, objdump_path): '''Replace the current object instance with a saved object. Parameters ---------- objdump_path: str The path to the file to load. ''' del self.__dict__ self.__dict__ = Learner.from_file(objdump_path).__dict__ def save(self, objdump_path): '''Save the learner to a file. Parameters ---------- objdump_path: str The path to where you want to save the learner. ''' # create the directory if it doesn't exist learner_dir = os.path.dirname(objdump_path) if not os.path.exists(learner_dir): os.makedirs(learner_dir) # write out the files joblib.dump((VERSION, self), objdump_path)
Python
0.000001
735a52b8ad4ebf7b6b8bb47e14667cd9004e624b
add some mappings
algo/lru.py
algo/lru.py
mapping = {} class Node: def __init__(self, val): self.next = None self.prev = None self.value = val class DoublyLinkedList: def __init__(self): self.head = None def insert(self, val): node = Node(val) mapping[val] = node head = self.head if self.head == None: self.head = node else: while head.next != None: head = head.next head.next = node node.prev = head def print_list(self): head = self.head while head != None: print head.value head = head.next if __name__ == '__main__': dll = DoublyLinkedList() for i in range(10): dll.insert(i)
class Node: def __init__(self, val): self.next = None self.prev = None self.value = val class DoublyLinkedList: def __init__(self): self.head = None def insert(self, val): node = Node(val) head = self.head if self.head == None: self.head = node else: while head.next != None: head = head.next head.next = node node.prev = head def print_list(self): head = self.head while head != None: print head.value head = head.next if __name__ == '__main__': dll = DoublyLinkedList() for i in range(10): dll.insert(i)
Python
0.000011
6bb58e13b657c1546f4f5d1afa70d48a9187f168
Update server.py
gprs/server.py
gprs/server.py
from socket import * from modules import decode_packet import sys from modules import params Parser = params.Parser() argv = Parser.createParser() ip_and_port = argv.parse_args(sys.argv[1:]) #host = ip_and_port.ip #port = int(ip_and_port.port) host = "0.0.0.0" port = 5100 addr = (host, port) print(host,port) tcp_socket = socket(AF_INET, SOCK_STREAM) tcp_socket.bind(addr) tcp_socket.listen(10) loop = True while loop: data = None print('wait connection...') conn, addr = tcp_socket.accept() while loop: f = open('logs/gprs.log', 'a+') data = conn.recv(109) decode_packet.insert(data) print(data) if data: f.write(str(data)) f.close() else: f.close() break conn.close() tcp_socket.close()
from socket import * from modules import decode_packet import sys from modules import params Parser = params.Parser() argv = Parser.createParser() ip_and_port = argv.parse_args(sys.argv[1:]) #host = ip_and_port.ip #port = int(ip_and_port.port) host = "0.0.0.0" port = 5300 addr = (host, port) print(host,port) tcp_socket = socket(AF_INET, SOCK_STREAM) tcp_socket.bind(addr) tcp_socket.listen(10) loop = True while loop: data = None print('wait connection...') conn, addr = tcp_socket.accept() while loop: f = open('logs/gprs.log', 'a+') data = conn.recv(109) decode_packet.insert(data) print(data) if data: f.write(str(data)) f.close() else: f.close() break conn.close() tcp_socket.close()
Python
0.000001
9e5b42fa14b50d91840a67646ed6779d8f5c22ae
Make ``cursor_kinds`` private
bears/c_languages/ClangComplexityBear.py
bears/c_languages/ClangComplexityBear.py
from clang.cindex import Index, CursorKind from coalib.bears.LocalBear import LocalBear from coalib.results.Result import Result from coalib.results.SourceRange import SourceRange from bears.c_languages.ClangBear import clang_available, ClangBear class ClangComplexityBear(LocalBear): """ Calculates cyclomatic complexity of each function and displays it to the user. """ LANGUAGES = ClangBear.LANGUAGES REQUIREMENTS = ClangBear.REQUIREMENTS AUTHORS = {'The coala developers'} AUTHORS_EMAILS = {'coala-devel@googlegroups.com'} LICENSE = 'AGPL-3.0' CAN_DETECT = {'Complexity'} check_prerequisites = classmethod(clang_available) _decisive_cursor_kinds = { CursorKind.IF_STMT, CursorKind.WHILE_STMT, CursorKind.FOR_STMT, CursorKind.DEFAULT_STMT, CursorKind.CASE_STMT} def function_key_points(self, cursor, top_function_level=False): """ Calculates number of function's decision points and exit points. :param top_function_level: Whether cursor is in the top level of the function. """ decisions, exits = 0, 0 for child in cursor.get_children(): if child.kind in self._decisive_cursor_kinds: decisions += 1 elif child.kind == CursorKind.RETURN_STMT: exits += 1 if top_function_level: # There is no point to move forward, so just return. return decisions, exits child_decisions, child_exits = self.function_key_points(child) decisions += child_decisions exits += child_exits if top_function_level: # Implicit return statement. exits += 1 return decisions, exits def complexities(self, cursor, filename): """ Calculates cyclomatic complexities of functions. """ file = cursor.location.file if file is not None and file.name != filename: # There is nothing to do in another file. return if cursor.kind == CursorKind.FUNCTION_DECL: child = next((child for child in cursor.get_children() if child.kind != CursorKind.PARM_DECL), None) if child: decisions, exits = self.function_key_points(child, True) complexity = max(1, decisions - exits + 2) yield cursor, complexity else: for child in cursor.get_children(): yield from self.complexities(child, filename) def run(self, filename, file, max_complexity: int=8): """ Check for all functions if they are too complicated using the cyclomatic complexity metric. You can read more about this metric at <https://www.wikiwand.com/en/Cyclomatic_complexity>. :param max_complexity: Maximum cyclomatic complexity that is considered to be normal. The value of 10 had received substantial corroborating evidence. But the general recommendation: "For each module, either limit cyclomatic complexity to [the agreed-upon limit] or provide a written explanation of why the limit was exceeded." """ root = Index.create().parse(filename).cursor for cursor, complexity in self.complexities(root, filename): if complexity > max_complexity: affected_code = (SourceRange.from_clang_range(cursor.extent),) yield Result( self, "The function '{function}' should be simplified. Its " "cyclomatic complexity is {complexity} which exceeds " "maximal recommended value " "of {rec_value}.".format( function=cursor.displayname, complexity=complexity, rec_value=max_complexity), affected_code=affected_code, additional_info=( "The cyclomatic complexity is a metric that measures " "how complicated a function is by counting branches " "and exits of each function.\n\n" "Your function seems to be complicated and should be " "refactored so that it can be understood by other " "people easily.\n\nSee " "<http://www.wikiwand.com/en/Cyclomatic_complexity>" " for more information."))
from clang.cindex import Index, CursorKind from coalib.bears.LocalBear import LocalBear from coalib.results.Result import Result from coalib.results.SourceRange import SourceRange from bears.c_languages.ClangBear import clang_available, ClangBear class ClangComplexityBear(LocalBear): """ Calculates cyclomatic complexity of each function and displays it to the user. """ LANGUAGES = ClangBear.LANGUAGES REQUIREMENTS = ClangBear.REQUIREMENTS AUTHORS = {'The coala developers'} AUTHORS_EMAILS = {'coala-devel@googlegroups.com'} LICENSE = 'AGPL-3.0' CAN_DETECT = {'Complexity'} check_prerequisites = classmethod(clang_available) decisive_cursor_kinds = { CursorKind.IF_STMT, CursorKind.WHILE_STMT, CursorKind.FOR_STMT, CursorKind.DEFAULT_STMT, CursorKind.CASE_STMT} def function_key_points(self, cursor, top_function_level=False): """ Calculates number of function's decision points and exit points. :param top_function_level: Whether cursor is in the top level of the function. """ decisions, exits = 0, 0 for child in cursor.get_children(): if child.kind in self.decisive_cursor_kinds: decisions += 1 elif child.kind == CursorKind.RETURN_STMT: exits += 1 if top_function_level: # There is no point to move forward, so just return. return decisions, exits child_decisions, child_exits = self.function_key_points(child) decisions += child_decisions exits += child_exits if top_function_level: # Implicit return statement. exits += 1 return decisions, exits def complexities(self, cursor, filename): """ Calculates cyclomatic complexities of functions. """ file = cursor.location.file if file is not None and file.name != filename: # There is nothing to do in another file. return if cursor.kind == CursorKind.FUNCTION_DECL: child = next((child for child in cursor.get_children() if child.kind != CursorKind.PARM_DECL), None) if child: decisions, exits = self.function_key_points(child, True) complexity = max(1, decisions - exits + 2) yield cursor, complexity else: for child in cursor.get_children(): yield from self.complexities(child, filename) def run(self, filename, file, max_complexity: int=8): """ Check for all functions if they are too complicated using the cyclomatic complexity metric. You can read more about this metric at <https://www.wikiwand.com/en/Cyclomatic_complexity>. :param max_complexity: Maximum cyclomatic complexity that is considered to be normal. The value of 10 had received substantial corroborating evidence. But the general recommendation: "For each module, either limit cyclomatic complexity to [the agreed-upon limit] or provide a written explanation of why the limit was exceeded." """ root = Index.create().parse(filename).cursor for cursor, complexity in self.complexities(root, filename): if complexity > max_complexity: affected_code = (SourceRange.from_clang_range(cursor.extent),) yield Result( self, "The function '{function}' should be simplified. Its " "cyclomatic complexity is {complexity} which exceeds " "maximal recommended value " "of {rec_value}.".format( function=cursor.displayname, complexity=complexity, rec_value=max_complexity), affected_code=affected_code, additional_info=( "The cyclomatic complexity is a metric that measures " "how complicated a function is by counting branches " "and exits of each function.\n\n" "Your function seems to be complicated and should be " "refactored so that it can be understood by other " "people easily.\n\nSee " "<http://www.wikiwand.com/en/Cyclomatic_complexity>" " for more information."))
Python
0
22952f57c33070f83c4e9c38b2a96543ed983f4e
Make ndb_persistence execute Context's complete event
furious/extras/appengine/ndb_persistence.py
furious/extras/appengine/ndb_persistence.py
# # Copyright 2014 WebFilings, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """This module contains the default functions to use when performing persistence operations backed by the App Engine ndb library. """ import logging from google.appengine.ext import ndb class FuriousContextNotFoundError(Exception): """FuriousContext entity not found in the datastore.""" class FuriousContext(ndb.Model): context = ndb.JsonProperty(indexed=False, compressed=True) @classmethod def from_context(cls, context): """Create a `cls` entity from a context.""" return cls(id=context.id, context=context.to_dict()) @classmethod def from_id(cls, id): """Load a `cls` entity and instantiate the Context it stores.""" from furious.context import Context # TODO: Handle exceptions and retries here. entity = cls.get_by_id(id) if not entity: raise FuriousContextNotFoundError( "Context entity not found for: {}".format(id)) return Context.from_dict(entity.context) class FuriousAsyncMarker(ndb.Model): """This entity serves as a 'complete' marker.""" pass def context_completion_checker(async): """Check if all Async jobs within a Context have been run.""" context_id = async.context_id logging.debug("Check completion for: %s", context_id) context = FuriousContext.from_id(context_id) logging.debug("Loaded context.") task_ids = context.task_ids logging.debug(task_ids) offset = 10 for index in xrange(0, len(task_ids), offset): keys = [ndb.Key(FuriousAsyncMarker, id) for id in task_ids[index:index + offset]] markers = ndb.get_multi(keys) if not all(markers): logging.debug("Not all Async's complete") return False logging.debug("All Async's complete!!") context.exec_event_handler('complete') return True def store_context(context): """Persist a Context object to the datastore.""" logging.debug("Attempting to store Context %s.", context.id) entity = FuriousContext.from_context(context) # TODO: Handle exceptions and retries here. key = entity.put() logging.debug("Stored Context with key: %s.", key) def store_async_result(async): """Persist the Async's result to the datastore.""" logging.debug("Storing result for %s", async) pass def store_async_marker(async): """Persist a marker indicating the Async ran to the datastore.""" logging.debug("Attempting to mark Async %s complete.", async.id) # TODO: Handle exceptions and retries here. key = FuriousAsyncMarker(id=async.id).put() logging.debug("Marked Async complete using marker: %s.", key)
# # Copyright 2014 WebFilings, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """This module contains the default functions to use when performing persistence operations backed by the App Engine ndb library. """ import logging from google.appengine.ext import ndb class FuriousContextNotFoundError(Exception): """FuriousContext entity not found in the datastore.""" class FuriousContext(ndb.Model): context = ndb.JsonProperty(indexed=False, compressed=True) @classmethod def from_context(cls, context): """Create a `cls` entity from a context.""" return cls(id=context.id, context=context.to_dict()) @classmethod def from_id(cls, id): """Load a `cls` entity and instantiate the Context it stores.""" from furious.context import Context # TODO: Handle exceptions and retries here. entity = cls.get_by_id(id) if not entity: raise FuriousContextNotFoundError( "Context entity not found for: {}".format(id)) return Context.from_dict(entity.context) class FuriousAsyncMarker(ndb.Model): """This entity serves as a 'complete' marker.""" pass def context_completion_checker(async): """Check if all Async jobs within a Context have been run.""" context_id = async.context_id logging.debug("Check completion for: %s", context_id) context = FuriousContext.from_id(context_id) logging.debug("Loaded context.") task_ids = context.task_ids logging.debug(task_ids) offset = 10 for index in xrange(0, len(task_ids), offset): keys = [ndb.Key(FuriousAsyncMarker, id) for id in task_ids[index:index + offset]] markers = ndb.get_multi(keys) if not all(markers): logging.debug("Not all Async's complete") return False logging.debug("All Async's complete!!") return True def store_context(context): """Persist a Context object to the datastore.""" logging.debug("Attempting to store Context %s.", context.id) entity = FuriousContext.from_context(context) # TODO: Handle exceptions and retries here. key = entity.put() logging.debug("Stored Context with key: %s.", key) def store_async_result(async): """Persist the Async's result to the datastore.""" logging.debug("Storing result for %s", async) pass def store_async_marker(async): """Persist a marker indicating the Async ran to the datastore.""" logging.debug("Attempting to mark Async %s complete.", async.id) # TODO: Handle exceptions and retries here. key = FuriousAsyncMarker(id=async.id).put() logging.debug("Marked Async complete using marker: %s.", key)
Python
0.000009
d428f6df195c0293340089b884b934fa16ef7ff6
Use local timezone if available. Fixes #3
wanikani/cli.py
wanikani/cli.py
import argparse import logging import os # If the tzlocal package is installed, then we will help the user out # and print things out in the local timezone LOCAL_TIMEZONE = None try: import tzlocal LOCAL_TIMEZONE = tzlocal.get_localzone() except ImportError: pass from wanikani.core import WaniKani, Radical, Kanji, Vocabulary CONFIG_PATH = os.path.join(os.path.expanduser('~'), '.wanikani') logger = logging.getLogger(__name__) def config(): if os.path.exists(CONFIG_PATH): logger.debug('Loading config from %s', CONFIG_PATH) with open(CONFIG_PATH) as f: return f.read().strip() return '' def main(): parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() # Global Options parser.add_argument('-a', '--api-key', default=config()) parser.add_argument('-d', '--debug', action='store_const', const=logging.DEBUG, default=logging.WARNING ) def profile(client, args): p = client.profile() print 'Username:', p['username'] print 'Level:', p['level'] profile.parser = subparsers.add_parser('profile') profile.parser.set_defaults(func=profile) def level_progress(client, args): p = client.level_progress() print p['user_information']['username'], 'level', p['user_information']['level'] print 'Radicals:', p['radicals_total'] print 'Kanji:', p['kanji_total'] level_progress.parser = subparsers.add_parser('progress') level_progress.parser.set_defaults(func=level_progress) def recent_unlocks(client, args): p = client.recent_unlocks() print p['user_information']['username'], 'level', p['user_information']['level'] for item in p['items']: print item['level'], item['character'] recent_unlocks.parser = subparsers.add_parser('unlocks') recent_unlocks.parser.set_defaults(func=recent_unlocks) def upcoming(client, args): queue = client.upcoming() for ts in sorted(queue): if len(queue[ts]): radicals, kanji, vocab, total = 0, 0, 0, 0 for obj in queue[ts]: total += 1 if isinstance(obj, Radical): radicals += 1 if isinstance(obj, Kanji): kanji += 1 if isinstance(obj, Vocabulary): vocab += 1 if LOCAL_TIMEZONE: ts.replace(tzinfo=LOCAL_TIMEZONE) # Note the trailing commas, # We only want a newline for the last one print ts, print 'Total:', total, print 'Radials:', radicals, print 'Kanji:', kanji, print 'Vocab:', vocab upcoming.parser = subparsers.add_parser('upcoming') upcoming.parser.set_defaults(func=upcoming) def set_key(client, args): with open(CONFIG_PATH, 'w') as f: f.write(args.api_key) print 'Wrote {0} to {1}'.format(args.api_key, CONFIG_PATH) set_key.parser = subparsers.add_parser('set_key') set_key.parser.set_defaults(func=set_key) set_key.parser.add_argument('api_key',help="New API Key") args = parser.parse_args() logging.basicConfig(level=args.debug) client = WaniKani(args.api_key) args.func(client, args)
import argparse import logging import os from wanikani.core import WaniKani, Radical, Kanji, Vocabulary CONFIG_PATH = os.path.join(os.path.expanduser('~'), '.wanikani') logger = logging.getLogger(__name__) def config(): if os.path.exists(CONFIG_PATH): logger.debug('Loading config from %s', CONFIG_PATH) with open(CONFIG_PATH) as f: return f.read().strip() return '' def main(): parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() # Global Options parser.add_argument('-a', '--api-key', default=config()) parser.add_argument('-d', '--debug', action='store_const', const=logging.DEBUG, default=logging.WARNING ) def profile(client, args): p = client.profile() print 'Username:', p['username'] print 'Level:', p['level'] profile.parser = subparsers.add_parser('profile') profile.parser.set_defaults(func=profile) def level_progress(client, args): p = client.level_progress() print p['user_information']['username'], 'level', p['user_information']['level'] print 'Radicals:', p['radicals_total'] print 'Kanji:', p['kanji_total'] level_progress.parser = subparsers.add_parser('progress') level_progress.parser.set_defaults(func=level_progress) def recent_unlocks(client, args): p = client.recent_unlocks() print p['user_information']['username'], 'level', p['user_information']['level'] for item in p['items']: print item['level'], item['character'] recent_unlocks.parser = subparsers.add_parser('unlocks') recent_unlocks.parser.set_defaults(func=recent_unlocks) def upcoming(client, args): queue = client.upcoming() for ts in sorted(queue): if len(queue[ts]): radicals, kanji, vocab, total = 0, 0, 0, 0 for obj in queue[ts]: total += 1 if isinstance(obj, Radical): radicals += 1 if isinstance(obj, Kanji): kanji += 1 if isinstance(obj, Vocabulary): vocab += 1 # Note the trailing commas, # We only want a newline for the last one print ts, print 'Total:', total, print 'Radials:', radicals, print 'Kanji:', kanji, print 'Vocab:', vocab upcoming.parser = subparsers.add_parser('upcoming') upcoming.parser.set_defaults(func=upcoming) def set_key(client, args): with open(CONFIG_PATH, 'w') as f: f.write(args.api_key) print 'Wrote {0} to {1}'.format(args.api_key, CONFIG_PATH) set_key.parser = subparsers.add_parser('set_key') set_key.parser.set_defaults(func=set_key) set_key.parser.add_argument('api_key',help="New API Key") args = parser.parse_args() logging.basicConfig(level=args.debug) client = WaniKani(args.api_key) args.func(client, args)
Python
0.000001
b53bee8978c6fe407fce7769e16ac4991e36fcda
Return unknown status if geolocation API is unavailable
client/plugins/geolocation.py
client/plugins/geolocation.py
#!/usr/bin/env python3 import pickle import json import os import re import requests import subprocess import sys from qlmdm import top_dir, var_dir from qlmdm.client import get_setting cache_file = os.path.join(var_dir, 'geolocation.cache') os.chdir(top_dir) def unknown(): print(json.dumps('unknown')) sys.exit() def old_data_is_good(old_data, ip_addresses, access_points): if 'response' not in old_data: return False try: old_ip_addresses = set(old_data['ip_addresses'].values()) except: old_ip_addresses = set() new_ip_addresses = set(ip_addresses.values()) if old_ip_addresses != new_ip_addresses: return False new_mac_addresses = set(a['macAddress'] for a in access_points) if not new_mac_addresses: return True try: old_mac_addresses = set(a['macAddress'] for a in old_data['access_points']) except: old_mac_addresses = set() percentage_overlap = (100 * len(new_mac_addresses & old_mac_addresses) / len(new_mac_addresses)) if percentage_overlap > 74: return True return False api_key = get_setting('geolocation_api_key') if not api_key: unknown() address_re = re.compile( r'\bAddress:\s*([0-9a-f][0-9a-f](?::[0-9a-f][0-9a-f])*)', re.IGNORECASE) signal_re = re.compile(r'\bSignal level=(-\d+)\d*dBm') channel_re = re.compile(r'\bChannel:\s*(\d+)') access_points = {} ip_addresses = json.loads( subprocess.check_output('client/plugins/ip_addresses.py').decode('ascii')) try: old_data = pickle.load(open(cache_file, 'rb')) except: old_data = {} # iwlist returns slightly different results every time, so we need to run it # several times and merge the output. for i in range(5): try: output = subprocess.check_output( ('iwlist', 'scan'), stderr=subprocess.STDOUT).decode('ascii') except: unknown() for cell in re.split(r'\n\s+Cell \d+ ', output): ap = {} match = address_re.search(cell) if not match: continue ap['macAddress'] = match.group(1).lower() match = signal_re.search(cell) if match: ap['signalStrength'] = match.group(1) match = channel_re.search(cell) if match: ap['channel'] = match.group(1) access_points[ap['macAddress']] = ap # To conserve API quota, don't submit if WiFi access points match the last # call's 75% or more and the IP addresses haven't changed. if old_data_is_good(old_data, ip_addresses, access_points.values()): sys.stderr.write('Using old data\n') print(json.dumps(old_data['response'])) sys.exit() data = {} if access_points: data['wifiAccessPoints'] = list(access_points.values()) url = 'https://www.googleapis.com/geolocation/v1/geolocate?key={}'.format( api_key) try: response = requests.post(url, data=json.dumps(data), timeout=5) response.raise_for_status() except: unknown() old_data = { 'response': response.json(), 'ip_addresses': ip_addresses, 'access_points': access_points, } pickle.dump(old_data, open(cache_file, 'wb')) print(json.dumps(response.json()))
#!/usr/bin/env python3 import pickle import json import os import re import requests import subprocess import sys from qlmdm import top_dir, var_dir from qlmdm.client import get_setting cache_file = os.path.join(var_dir, 'geolocation.cache') os.chdir(top_dir) def unknown(): print(json.dumps('unknown')) sys.exit() def old_data_is_good(old_data, ip_addresses, access_points): if 'response' not in old_data: return False try: old_ip_addresses = set(old_data['ip_addresses'].values()) except: old_ip_addresses = set() new_ip_addresses = set(ip_addresses.values()) if old_ip_addresses != new_ip_addresses: return False new_mac_addresses = set(a['macAddress'] for a in access_points) if not new_mac_addresses: return True try: old_mac_addresses = set(a['macAddress'] for a in old_data['access_points']) except: old_mac_addresses = set() percentage_overlap = (100 * len(new_mac_addresses & old_mac_addresses) / len(new_mac_addresses)) if percentage_overlap > 74: return True return False api_key = get_setting('geolocation_api_key') if not api_key: unknown() address_re = re.compile( r'\bAddress:\s*([0-9a-f][0-9a-f](?::[0-9a-f][0-9a-f])*)', re.IGNORECASE) signal_re = re.compile(r'\bSignal level=(-\d+)\d*dBm') channel_re = re.compile(r'\bChannel:\s*(\d+)') access_points = {} ip_addresses = json.loads( subprocess.check_output('client/plugins/ip_addresses.py').decode('ascii')) try: old_data = pickle.load(open(cache_file, 'rb')) except: old_data = {} # iwlist returns slightly different results every time, so we need to run it # several times and merge the output. for i in range(5): try: output = subprocess.check_output( ('iwlist', 'scan'), stderr=subprocess.STDOUT).decode('ascii') except: unknown() for cell in re.split(r'\n\s+Cell \d+ ', output): ap = {} match = address_re.search(cell) if not match: continue ap['macAddress'] = match.group(1).lower() match = signal_re.search(cell) if match: ap['signalStrength'] = match.group(1) match = channel_re.search(cell) if match: ap['channel'] = match.group(1) access_points[ap['macAddress']] = ap # To conserve API quota, don't submit if WiFi access points match the last # call's 75% or more and the IP addresses haven't changed. if old_data_is_good(old_data, ip_addresses, access_points.values()): sys.stderr.write('Using old data\n') print(json.dumps(old_data['response'])) sys.exit() data = {} if access_points: data['wifiAccessPoints'] = list(access_points.values()) url = 'https://www.googleapis.com/geolocation/v1/geolocate?key={}'.format( api_key) response = requests.post(url, data=json.dumps(data)) try: response.raise_for_status() except: unknown() old_data = { 'response': response.json(), 'ip_addresses': ip_addresses, 'access_points': access_points, } pickle.dump(old_data, open(cache_file, 'wb')) print(json.dumps(response.json()))
Python
0.000002
85775847e93b35ac19e09962bc2b10f9be666e33
Update analysis.py with new finallist.py method
analysis.py
analysis.py
import random import linecache from unidecode import unidecode # Process links into list finallist = [None] * 5716809 with open('links-simple-sorted.txt', 'r') as src: for line in src: [oNode, dNode] = line.split(':') finallist[int(oNode)] = dNode.rstrip('\n')[1:] # ACTUALLY: pick a random line in links-sorted, and translate the numbers from there # Get a random node, and pull that line from the links doc; want this to be an option oNode = random.randint(1,5706070) dNode = finallist[oNode] dNode = dNode.split(' ') # Translate these into titles and print the result oname = linecache.getline('titles-sorted.txt',int(oNode)) oname = oname[:-1] # Gets rid of the trailing newline print '\nORIGIN NODE: ' + oname + '\n' print 'DESTINATION NODES:' for thisnum in dNode: dname = linecache.getline('titles-sorted.txt',int(thisnum))[:-1] print ' ' + dname print '\n'
import random import linecache from unidecode import unidecode # ACTUALLY: pick a random line in links-sorted, and translate the numbers from there # Get a random node, and pull that line from the links doc––want this to be an option # Pull from links because some titles don't have link lines lineno = random.randint(1,5706070) linestr = linecache.getline('links-simple-sorted.txt',lineno) # Process the string to split the "from" and "to" numbers [origin, dest] = linestr.split(':') dest = dest[1:-1] # Gets rid of the first space and trailing newline dest = dest.split(' ') # Split at spaces # Translate these into title oname = lincache.getline('titles-sorted.txt',int(origin)) oname = oname[:-1] # Gets rid of the trailing newline UNIoname = unidecode(u oname) for thisnum in dest: dname = linecache.getline('titles-sorted.txt',int(thisnum))[:-1] UNIdname = unidecode(linecache.getline('titles-sorted.txt', int(thisnum))[:-1]) # Get some stats bro linksout = len(dest) # To get linksin need an adjacency matrix def assemblematrix(): # Something with links-simple-sorted.txt # Parse that shit in def linksin(node): # Locations of value "1" in the row int(node) def linksout(node): # Locations of value "1" in the col int(node)
Python
0
6a3f0ade1d8fe16eeda6d339220b7ef877b402e5
Add no-break options
LFI.TESTER.py
LFI.TESTER.py
''' @KaiyiZhang Github ''' import sys import urllib2 import getopt import time target = '' depth = 6 file = 'etc/passwd' html = '' prefix = '' url = '' keyword = 'root' force = False def usage(): print "LFI.Tester.py Help:" print "Usage: LFI.TESTER.py -t [-d] [-f] [-k]" print " -t,--target The test url" print " -d,--depth The depth for test (Default is 6)" print " -f,--file The File include (Default is etc/passwd)" print " -k,--keyword the keyword for vuln check (Default is root)" try: if len(sys.argv) < 2: usage() sys.exit() opts,args = getopt.getopt(sys.argv[1:],"ht:d:f:k:n",["help","target=","depth=","file=","keyword=","no-break"]) for opt, arg in opts: if opt in("-h","--help"): usage() sys.exit() if opt in("-t","--target"): target = arg if not target.startswith('http://', 0, 7): target = 'http://' + target if opt in("-d","--depth"): depth = int(arg) if depth < 1: usage() sys.exit() if opt in("-f","--file"): file = arg if file.startswith('/',0,1): file =file[1:] if opt in("-k","--keyword"): keyword = arg #print keyword if opt in("-n","--no-break"): force = True except getopt.GetoptError: usage() sys.exit(2) for i in range(0,depth): prefix += '../' url = target + prefix + file print "Testing: ",url try: response = urllib2.urlopen(url) #print response.info() html = response.read() #print html except: pass if keyword in html: print url, " is Vulnerable" if not force: break else: continue else: time.sleep(2) continue
''' @KaiyiZhang Github ''' import sys import urllib2 import getopt import time target = '' depth = 6 file = 'etc/passwd' html = '' prefix = '' url = '' keyword='root' def usage(): print "LFI.Tester.py Help:" print "Usage: LFI.TESTER.py -t [-d] [-f] [-k]" print " -t,--target The test url" print " -d,--depth The depth for test (Default is 6)" print " -f,--file The File include (Default is etc/passwd)" print " -k,--keyword the keyword for vuln check (Default is root)" try: if len(sys.argv) < 2: usage() sys.exit() opts,args = getopt.getopt(sys.argv[1:],"ht:d:f:k:",["help","target=","depth=","file=","keyword="]) for opt, arg in opts: if opt in("-h","--help"): usage() sys.exit() if opt in("-t","--target"): target = arg if not target.startswith('http://', 0, 7): target = 'http://' + target if opt in("-d","--depth"): depth = int(arg) if depth < 1: usage() sys.exit() if opt in("-f","--file"): file = arg if file.startswith('/',0,1): file =file[1:] if opt in("-k","--keyword"): keyword = arg #print keyword except getopt.GetoptError: usage() sys.exit(2) for i in range(0,depth): prefix += '../' url = target + prefix + file print "Testing: ",url try: response = urllib2.urlopen(url) #print response.info() html = response.read() #print html except: pass if(keyword in html): print url, " is Vulnerable" break else: time.sleep(2) continue
Python
0.998376
68c0c054e5b9874f8a6423c35fb83c9de351b9e0
fix doc build
examples/plot_benktander.py
examples/plot_benktander.py
""" ==================================================================== Benktander: Relationship between Chainladder and BornhuetterFerguson ==================================================================== This example demonstrates the relationship between the Chainladder and BornhuetterFerguson methods by way fo the Benktander model. Each is a special case of the Benktander model where ``n_iters = 1`` for BornhuetterFerguson and as ``n_iters`` approaches infinity yields the chainladder. As ``n_iters`` increases the apriori selection becomes less relevant regardless of initial choice. """ import chainladder as cl # Load Data clrd = cl.load_sample('clrd') medmal_paid = clrd.groupby('LOB').sum().loc['medmal', 'CumPaidLoss'] medmal_prem = clrd.groupby('LOB').sum().loc['medmal', 'EarnedPremDIR'].latest_diagonal # Generate LDFs and Tail Factor medmal_paid = cl.Development().fit_transform(medmal_paid) medmal_paid = cl.TailCurve().fit_transform(medmal_paid) # Benktander Model benk = cl.Benktander() # Prep Benktander Grid Search with various assumptions, and a scoring function param_grid = dict(n_iters=list(range(1,100,2)), apriori=[0.50, 0.75, 1.00]) scoring = {'IBNR':lambda x: x.ibnr_.sum()} grid = cl.GridSearch(benk, param_grid, scoring=scoring) # Perform Grid Search grid.fit(medmal_paid, sample_weight=medmal_prem) # Plot data grid.results_.pivot(index='n_iters', columns='apriori', values='IBNR').plot( title='Benktander convergence to Chainladder', grid=True).set(ylabel='IBNR')
""" ==================================================================== Benktander: Relationship between Chainladder and BornhuetterFerguson ==================================================================== This example demonstrates the relationship between the Chainladder and BornhuetterFerguson methods by way fo the Benktander model. Each is a special case of the Benktander model where ``n_iters = 1`` for BornhuetterFerguson and as ``n_iters`` approaches infinity yields the chainladder. As ``n_iters`` increases the apriori selection becomes less relevant regardless of initial choice. """ import chainladder as cl # Load Data clrd = cl.load_sample('clrd') medmal_paid = clrd.groupby('LOB').sum().loc['medmal', 'CumPaidLoss'] medmal_prem = clrd.groupby('LOB').sum().loc['medmal', 'EarnedPremDIR'].latest_diagonal medmal_prem.rename('development', ['premium']) # Generate LDFs and Tail Factor medmal_paid = cl.Development().fit_transform(medmal_paid) medmal_paid = cl.TailCurve().fit_transform(medmal_paid) # Benktander Model benk = cl.Benktander() # Prep Benktander Grid Search with various assumptions, and a scoring function param_grid = dict(n_iters=list(range(1,100,2)), apriori=[0.50, 0.75, 1.00]) scoring = {'IBNR':lambda x: x.ibnr_.sum()} grid = cl.GridSearch(benk, param_grid, scoring=scoring) # Perform Grid Search grid.fit(medmal_paid, sample_weight=medmal_prem) # Plot data grid.results_.pivot(index='n_iters', columns='apriori', values='IBNR').plot( title='Benktander convergence to Chainladder', grid=True).set(ylabel='IBNR')
Python
0
15307ebe2c19c1a3983b0894152ba81fdde34619
Add comment on dist of first function
exp/descriptivestats.py
exp/descriptivestats.py
import pandas import numpy import matplotlib.pyplot as plt def univariate_stats(): # Generate 1000 random numbers from a normal distribution num_examples = 1000 z = pandas.Series(numpy.random.randn(num_examples)) # Minimum print(z.min()) # Maximum print(z.max()) # Mean print(z.mean()) # Median print(z.median()) # Variance print(z.var()) # Standard deviation print(z.std()) # Mean absolute deviation print(z.mad()) # Interquartile range print(z.quantile(0.75) - z.quantile(0.25)) z.plot(kind="hist") def multivariate_stats(): num_examples = 1000 x = pandas.Series(numpy.random.randn(num_examples)) y = x + pandas.Series(numpy.random.randn(num_examples)) z = x + pandas.Series(numpy.random.randn(num_examples)) # Covariance print(y.cov(z)) # Covariance of y with itself is equal to variance print(y.cov(y), y.var()) # Correlation print(y.corr(z)) univariate_stats() multivariate_stats() plt.show()
import pandas import numpy import matplotlib.pyplot as plt def univariate_stats(): num_examples = 1000 z = pandas.Series(numpy.random.randn(num_examples)) # Minimum print(z.min()) # Maximum print(z.max()) # Mean print(z.mean()) # Median print(z.median()) # Variance print(z.var()) # Standard deviation print(z.std()) # Mean absolute deviation print(z.mad()) # Interquartile range print(z.quantile(0.75) - z.quantile(0.25)) z.plot(kind="hist") def multivariate_stats(): num_examples = 1000 x = pandas.Series(numpy.random.randn(num_examples)) y = x + pandas.Series(numpy.random.randn(num_examples)) z = x + pandas.Series(numpy.random.randn(num_examples)) # Covariance print(y.cov(z)) # Covariance of y with itself is equal to variance print(y.cov(y), y.var()) # Correlation print(y.corr(z)) univariate_stats() multivariate_stats() plt.show()
Python
0
7ff6a0dc3a4f6f1ed47f999340f25fe3d5546bd4
fix command order in shell help test
tests/ps_schedstatistics/tests/01-run.py
tests/ps_schedstatistics/tests/01-run.py
#!/usr/bin/env python3 # Copyright (C) 2017 Inria # # This file is subject to the terms and conditions of the GNU Lesser # General Public License v2.1. See the file LICENSE in the top level # directory for more details. import sys from testrunner import run PS_EXPECTED = ( (r'\tpid | name | state Q | pri | stack \( used\) | ' r'base addr | current | runtime | switches'), (r'\t - | isr_stack | - - | - | \d+ \( -?\d+\) | ' r'0x\d+ | 0x\d+'), (r'\t 1 | idle | pending Q | 15 | \d+ \( -?\d+\) | ' r'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'), (r'\t 2 | main | running Q | 7 | \d+ \( -?\d+\) | ' r'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'), (r'\t 3 | thread | bl rx _ | 6 | \d+ \( -?\d+\) | ' r'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'), (r'\t 4 | thread | bl rx _ | 6 | \d+ \( -?\d+\) | ' r'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'), (r'\t 5 | thread | bl rx _ | 6 | \d+ \( -?\d+\) | ' r'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'), (r'\t 6 | thread | bl mutex _ | 6 | \d+ \( -?\d+\) | ' r'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'), (r'\t 7 | thread | bl rx _ | 6 | \d+ \( -?\d+\) | ' r'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'), (r'\t | SUM | | | \d+ \(\d+\)') ) def _check_startup(child): for i in range(5): child.expect_exact('Creating thread #{}, next={}' .format(i, (i + 1) % 5)) def _check_help(child): child.sendline('') child.expect_exact('>') child.sendline('help') child.expect_exact('Command Description') child.expect_exact('---------------------------------------') child.expect_exact('ps Prints information about ' 'running threads.') child.expect_exact('reboot Reboot the node') def _check_ps(child): child.sendline('ps') for line in PS_EXPECTED: child.expect(line) # Wait for all lines of the ps output to be displayed child.expect_exact('>') def testfunc(child): _check_startup(child) _check_help(child) _check_ps(child) if __name__ == "__main__": sys.exit(run(testfunc))
#!/usr/bin/env python3 # Copyright (C) 2017 Inria # # This file is subject to the terms and conditions of the GNU Lesser # General Public License v2.1. See the file LICENSE in the top level # directory for more details. import sys from testrunner import run PS_EXPECTED = ( (r'\tpid | name | state Q | pri | stack \( used\) | ' r'base addr | current | runtime | switches'), (r'\t - | isr_stack | - - | - | \d+ \( -?\d+\) | ' r'0x\d+ | 0x\d+'), (r'\t 1 | idle | pending Q | 15 | \d+ \( -?\d+\) | ' r'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'), (r'\t 2 | main | running Q | 7 | \d+ \( -?\d+\) | ' r'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'), (r'\t 3 | thread | bl rx _ | 6 | \d+ \( -?\d+\) | ' r'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'), (r'\t 4 | thread | bl rx _ | 6 | \d+ \( -?\d+\) | ' r'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'), (r'\t 5 | thread | bl rx _ | 6 | \d+ \( -?\d+\) | ' r'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'), (r'\t 6 | thread | bl mutex _ | 6 | \d+ \( -?\d+\) | ' r'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'), (r'\t 7 | thread | bl rx _ | 6 | \d+ \( -?\d+\) | ' r'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'), (r'\t | SUM | | | \d+ \(\d+\)') ) def _check_startup(child): for i in range(5): child.expect_exact('Creating thread #{}, next={}' .format(i, (i + 1) % 5)) def _check_help(child): child.sendline('') child.expect_exact('>') child.sendline('help') child.expect_exact('Command Description') child.expect_exact('---------------------------------------') child.expect_exact('reboot Reboot the node') child.expect_exact('ps Prints information about ' 'running threads.') def _check_ps(child): child.sendline('ps') for line in PS_EXPECTED: child.expect(line) # Wait for all lines of the ps output to be displayed child.expect_exact('>') def testfunc(child): _check_startup(child) _check_help(child) _check_ps(child) if __name__ == "__main__": sys.exit(run(testfunc))
Python
0.000001
9af7c8bfc22a250ce848d50ca26877e177f767c1
Fix execution on Monday
management.py
management.py
from logging import _nameToLevel as nameToLevel from argparse import ArgumentParser from Common.emailer import Emailer from DesksReminder.reminders import HelpDeskTechReminder, HelpDeskLabReminder, HelpDeskOtherReminder, \ UrgentDeskReminder, AccountsDeskReminder from HelpDesk.synchronization import AskbotSync, HelpDeskCaretaker from HelpDesk.stackoverflowsync import StackOverflowSync from urllib3 import disable_warnings from urllib3.exceptions import InsecureRequestWarning from datetime import datetime __author__ = 'Fernando López' __version__ = "1.3.0" def init(): parser = ArgumentParser(prog='Jira Management Scripts', description='') parser.add_argument('-l', '--log', default='INFO', help='The logging level to be used.') args = parser.parse_args() loglevel = None try: loglevel = nameToLevel[args.log.upper()] except Exception as e: print('Invalid log level: {}'.format(args.log)) print('Please use one of the following values:') print(' * CRITICAL') print(' * ERROR') print(' * WARNING') print(' * INFO') print(' * DEBUG') print(' * NOTSET') exit() return loglevel if __name__ == "__main__": loglevel = init() mailer = Emailer(loglevel=loglevel) disable_warnings(InsecureRequestWarning) today = datetime.today().weekday() if today == 0: # Send reminder of pending JIRA tickets, only every Mondays techReminder = HelpDeskTechReminder(loglevel=loglevel, mailer=mailer) techReminder.process() labReminder = HelpDeskLabReminder(loglevel=loglevel, mailer=mailer) labReminder.process() otherReminder = HelpDeskOtherReminder(loglevel=loglevel, mailer=mailer) otherReminder.process() urgentReminder = UrgentDeskReminder(loglevel=loglevel, mailer=mailer) urgentReminder.process() accountReminder = AccountsDeskReminder(loglevel=loglevel, mailer=mailer) accountReminder.process() # Askbot synchronization and Jira caretaker actions, every day askbotSync = AskbotSync(loglevel=loglevel) askbotSync.process() # Automatic reassign tickets to owners based on some extracted information, every day helpdeskCaretaker = HelpDeskCaretaker(loglevel=loglevel) helpdeskCaretaker.process() # StackoverFlow synchronization, every day stackoverflowSync = StackOverflowSync(loglevel=loglevel) stackoverflowSync.process(year=2015, month=9, day=21)
from logging import _nameToLevel as nameToLevel from argparse import ArgumentParser from Common.emailer import Emailer from DesksReminder.reminders import HelpDeskTechReminder, HelpDeskLabReminder, HelpDeskOtherReminder, \ UrgentDeskReminder, AccountsDeskReminder from HelpDesk.synchronization import AskbotSync, HelpDeskCaretaker from HelpDesk.stackoverflowsync import StackOverflowSync from urllib3 import disable_warnings from urllib3.exceptions import InsecureRequestWarning from datetime import datetime __author__ = 'Fernando López' __version__ = "1.3.0" def init(): parser = ArgumentParser(prog='Jira Management Scripts', description='') parser.add_argument('-l', '--log', default='INFO', help='The logging level to be used.') args = parser.parse_args() loglevel = None try: loglevel = nameToLevel[args.log.upper()] except Exception as e: print('Invalid log level: {}'.format(args.log)) print('Please use one of the following values:') print(' * CRITICAL') print(' * ERROR') print(' * WARNING') print(' * INFO') print(' * DEBUG') print(' * NOTSET') exit() return loglevel if __name__ == "__main__": loglevel = init() mailer = Emailer(loglevel=loglevel) disable_warnings(InsecureRequestWarning) today = datetime.today().weekday() if today == 2: # Send reminder of pending JIRA tickets, only every Mondays techReminder = HelpDeskTechReminder(loglevel=loglevel, mailer=mailer) techReminder.process() labReminder = HelpDeskLabReminder(loglevel=loglevel, mailer=mailer) labReminder.process() otherReminder = HelpDeskOtherReminder(loglevel=loglevel, mailer=mailer) otherReminder.process() urgentReminder = UrgentDeskReminder(loglevel=loglevel, mailer=mailer) urgentReminder.process() accountReminder = AccountsDeskReminder(loglevel=loglevel, mailer=mailer) accountReminder.process() # Askbot synchronization and Jira caretaker actions, every day askbotSync = AskbotSync(loglevel=loglevel) askbotSync.process() # Automatic reassign tickets to owners based on some extracted information, every day helpdeskCaretaker = HelpDeskCaretaker(loglevel=loglevel) helpdeskCaretaker.process() # StackoverFlow synchronization, every day stackoverflowSync = StackOverflowSync(loglevel=loglevel) stackoverflowSync.process(year=2015, month=9, day=21)
Python
0.000047
c67a32e731037143baf44841bc7e5a8b5e14473c
Add Reverse Method and Initialize an array from an list
LinkedList.py
LinkedList.py
class Node(object): def __init__(self, data=None, next_node=None): self.data = data self.next = next_node class LinkedList(object): #default constructor def __init__(self,array=None): self.head=None self.length=0 if(array!=None): self.initArray(array) #constructor with list as argument def initArray(self,array): for value in array: self.prepend(value) self.reverse() #method to copy a Linked List and to return the copy def copy(self): head2=LinkedList() temp=self.head while (temp!=None): head2.prepend(temp.data) temp=temp.next head2.reverse() return head2 def prepend(self, data): self.head=Node(data,self.head) self.length+=1 def append(self, data): temp=self.head parent=None while(temp!=None): parent=temp temp=temp.next temp=Node(data,None) if(parent==None): self.head=temp else: parent.next=temp self.length+=1 def InsertNth(self,data,position): temp=self.head index=0 parent=None while(index!=position): parent=temp temp=temp.next index+=1 temp=Node(data) if(parent==None): temp.next=self.head self.head=temp else: temp.next=parent.next parent.next=temp self.length+=1 def printLinkedList(self,sep=" "): if(self.length==0): return None temp=self.head while (temp.next!=None): print(str(temp.data),end=sep) temp=temp.next print(temp.data) def getData(self,position): if(self.length<=position): return None temp=self.head index=0 while(index!=position): temp=temp.next index+=1 return temp.data def remove(self,data): temp=self.head parent=None while (temp.data!=data and temp!=None): parent=temp temp=temp.next if(temp==None): return -1 parent.next=temp.next self.length-=1 return 0 def removeAt(self,position): if(self.length<=position): return -1 temp=self.head self.length-=1 index=0 if(position==0): self.head=self.head.next return 0 while(index!=position): parent=temp temp=temp.next index+=1 parent.next=temp.next return 0 def reverse(self): temp=self.head new=None while (temp!=None): next=temp.next temp.next=new new=temp temp=next self.head=new
class Node(object): def __init__(self, data=None, next_node=None): self.data = data self.next = next_node class LinkedList(object): def __init__(self): self.head=None self.length=0 def prepend(self, data): self.head=Node(data,self.head) self.length+=1 def append(self, data): temp=self.head parent=None while(temp!=None): parent=temp temp=temp.next temp=Node(data,None) if(parent==None): self.head=temp else: parent.next=temp self.length+=1 def InsertNth(self,data,position): temp=self.head index=0 parent=None while(index!=position): parent=temp temp=temp.next index+=1 temp=Node(data) if(parent==None): temp.next=self.head self.head=temp else: temp.next=parent.next parent.next=temp self.length+=1 def printLinkedList(self,sep=" "): if(self.length==0): return None temp=self.head while (temp.next!=None): print(str(temp.data),end=sep) temp=temp.next print(temp.data) def getData(self,position): if(self.length<=position): return None temp=self.head index=0 while(index!=position): temp=temp.next index+=1 return temp.data def remove(self,data): temp=self.head parent=None while (temp.data!=data and temp!=None): parent=temp temp=temp.next if(temp==None): return -1 parent.next=temp.next self.length-=1 return 0 def removeAt(self,position): if(self.length<=position): return -1 temp=self.head self.length-=1 index=0 if(position==0): self.head=self.head.next return 0 while(index!=position): parent=temp temp=temp.next index+=1 parent.next=temp.next return 0
Python
0.000001
ecd2821a99dee895f3ab7c5dbcc6d86983268560
Update src url for dev in views
__init__.py
__init__.py
from flask import Flask, request, redirect, url_for from twilio.rest import TwilioRestClient from PIL import Image, ImageDraw, ImageFont import time app = Flask(__name__, static_folder='static', static_url_path='') client = TwilioRestClient( account='ACb01b4d6edfb1b41a8b80f5fed2c19d1a', token='97e6b9c0074b2761eff1375fb088adda' ) @app.route('/', methods=['GET', 'POST']) def send_image(): if request.method == 'GET': return 'The deployment worked! Now copy your browser URL into the' + \ ' Twilio message text box for your phone number.' sender_number = request.form.get('From', '') twilio_number = request.form.get('To', '') user_text = request.form.get('Body', '') image_url, msg_text = mod_photo(user_text) send_mms_twiml(image_url, msg_text, sender_number, twilio_number) return 'ok' def mod_photo(user_text): base = Image.open('static/images/original/portland.jpg').convert('RGBA') txt = Image.new('RGBA', base.size, (255, 255, 255, 0)) fnt = ImageFont.truetype('static/fonts/Gobold.ttf', 30) d = ImageDraw.Draw(txt) d.text( (25, 25), '{}...'.format(user_text), font=fnt, fill=(255, 255, 255, 255) ) image = Image.alpha_composite(base, txt) image.save('static/images/changed/portland_{}.jpg'.format(user_text)) try: msg_text = '{}: Imagine yourself in Portland!'.format(user_text) image_url = 'http://dev.thevariable.com/images/changed/portland_{}.jpg'.format(user_text) except: msg = "Sorry, we couldn't pull a kitten, " + \ "here's a dinosaur instead!" image_url = "https://farm1.staticflickr.com/46/" + \ "154877897_a299d80baa_b_d.jpg" return image_url, msg_text def send_mms_twiml(image_url, msg_text, sender_number, twilio_number): client.messages.create( to=sender_number, from_=twilio_number, body=msg_text, media_url=image_url ) if __name__ == "__main__": app.run(debug=True)
from flask import Flask, request, redirect, url_for from twilio.rest import TwilioRestClient from PIL import Image, ImageDraw, ImageFont import time app = Flask(__name__, static_folder='static', static_url_path='') client = TwilioRestClient( account='ACb01b4d6edfb1b41a8b80f5fed2c19d1a', token='97e6b9c0074b2761eff1375fb088adda' ) @app.route('/', methods=['GET', 'POST']) def send_image(): if request.method == 'GET': return 'The deployment worked! Now copy your browser URL into the' + \ ' Twilio message text box for your phone number.' sender_number = request.form.get('From', '') twilio_number = request.form.get('To', '') user_text = request.form.get('Body', '') image_url, msg_text = mod_photo(user_text) send_mms_twiml(image_url, msg_text, sender_number, twilio_number) return 'ok' def mod_photo(user_text): base = Image.open('static/images/original/portland.jpg').convert('RGBA') txt = Image.new('RGBA', base.size, (255, 255, 255, 0)) fnt = ImageFont.truetype('static/fonts/Gobold.ttf', 30) d = ImageDraw.Draw(txt) d.text( (25, 25), '{}...'.format(user_text), font=fnt, fill=(255, 255, 255, 255) ) image = Image.alpha_composite(base, txt) image.save('static/images/changed/portland_{}.jpg'.format(user_text)) try: msg_text = '{}: Imagine yourself in Portland!'.format(user_text) image_url = 'http://12dcb913.ngrok.com/images/changed/portland_{}.jpg'.format(user_text) except: msg = "Sorry, we couldn't pull a kitten, " + \ "here's a dinosaur instead!" image_url = "https://farm1.staticflickr.com/46/" + \ "154877897_a299d80baa_b_d.jpg" return image_url, msg_text def send_mms_twiml(image_url, msg_text, sender_number, twilio_number): client.messages.create( to=sender_number, from_=twilio_number, body=msg_text, media_url=image_url ) if __name__ == "__main__": app.run(debug=True)
Python
0
598bb39414825ff8ab561babb470b85f06c58020
Update __init__.py
__init__.py
__init__.py
from mlpack.linear_regression import linear_regression from mlpack.logistic_regression import logistic_regression """ MlPack ====== Provides 1. A Variety of Machine learning packages 2. Good and Easy hand written programs with good documentation 3. Linear Regression, Logistic Regression Available subpackages --------------------- 1. Linear Regression 2. Logistic Regression See subpackages for more details. """
from mlpack import linear_regression from mlpack import logistic_regression """ MlPack ====== Provides 1. A Variety of Machine learning packages 2. Good and Easy hand written programs with good documentation 3. Linear Regression, Logistic Regression Available subpackages --------------------- 1. Linear Regression 2. Logistic Regression See subpackages for more details. """
Python
0
7875c6b4848e7c30a6d5a53c2b3c01d7aba5fa65
improve voting results test
scraper/test.py
scraper/test.py
from django.test import TestCase import scraper.documents import scraper.votings # metadata = scraper.documents.get_metadata(document_id='kst-33885-7') # print(metadata) # page_url = 'https://zoek.officielebekendmakingen.nl/kst-33885-7.html?zoekcriteria=%3fzkt%3dEenvoudig%26pst%3d%26vrt%3d33885%26zkd%3dInDeGeheleText%26dpr%3dAfgelopenDag%26spd%3d20160522%26epd%3d20160523%26sdt%3dDatumBrief%26ap%3d%26pnr%3d1%26rpp%3d10%26_page%3d4%26sorttype%3d1%26sortorder%3d4&resultIndex=34&sorttype=1&sortorder=4' # scraper.documents.get_document_id(page_url) # scraper.documents.search_politieknl_dossier(33885) class TestExample(TestCase): """ Example test case """ dossier_nr = 33885 def test_get_voting_pages_for_dossier(self): """ Example test """ expected_urls = [ 'https://www.tweedekamer.nl/kamerstukken/stemmingsuitslagen/detail?id=2016P10154', 'https://www.tweedekamer.nl/kamerstukken/stemmingsuitslagen/detail?id=2016P10153' ] votings_urls = scraper.votings.get_voting_pages_for_dossier(self.dossier_nr) self.assertEqual(len(expected_urls), len(votings_urls)) for i in range(len(votings_urls)): self.assertEqual(votings_urls[i], expected_urls[i]) def test_get_votings_for_page(self): voting_page_urls = [ 'https://www.tweedekamer.nl/kamerstukken/stemmingsuitslagen/detail?id=2016P10154', 'https://www.tweedekamer.nl/kamerstukken/stemmingsuitslagen/detail?id=2016P10153' ] expected_results = [ {'result': 'Verworpen', 'document_id': '33885-17'}, {'result': 'Aangenomen', 'document_id': '33885-30'}, {'result': 'Verworpen', 'document_id': '33885-19'}, {'result': 'Verworpen', 'document_id': '33885-20'}, {'result': 'Eerder ingetrokken (tijdens debat)', 'document_id': '33885-21'}, {'result': 'Verworpen', 'document_id': '33885-31'}, {'result': 'Aangehouden (tijdens debat)', 'document_id': '33885-23'}, {'result': 'Verworpen', 'document_id': '33885-24'}, {'result': 'Aangehouden (tijdens debat)', 'document_id': '33885-25'}, {'result': 'Verworpen', 'document_id': '33885-26'}, {'result': 'Verworpen', 'document_id': '33885-27'}, {'result': 'Eerder ingetrokken (tijdens debat)', 'document_id': '33885-28'}, {'result': 'Ingetrokken', 'document_id': '33885-14'}, {'result': 'Verworpen', 'document_id': '33885-15'}, {'result': 'Verworpen', 'document_id': '33885-16'}, {'result': 'Verworpen', 'document_id': '33885-10'}, {'result': 'Verworpen', 'document_id': '33885-13'}, {'result': 'Aangenomen', 'document_id': '33885'} ] results = [] for url in voting_page_urls: results += scraper.votings.get_votings_for_page(url) self.assertEqual(len(results), len(expected_results)) for i in range(len(results)): print(results[i]) self.assertEqual(results[i], expected_results[i])
from django.test import TestCase import scraper.documents import scraper.votings # metadata = scraper.documents.get_metadata(document_id='kst-33885-7') # print(metadata) # page_url = 'https://zoek.officielebekendmakingen.nl/kst-33885-7.html?zoekcriteria=%3fzkt%3dEenvoudig%26pst%3d%26vrt%3d33885%26zkd%3dInDeGeheleText%26dpr%3dAfgelopenDag%26spd%3d20160522%26epd%3d20160523%26sdt%3dDatumBrief%26ap%3d%26pnr%3d1%26rpp%3d10%26_page%3d4%26sorttype%3d1%26sortorder%3d4&resultIndex=34&sorttype=1&sortorder=4' # scraper.documents.get_document_id(page_url) # scraper.documents.search_politieknl_dossier(33885) class TestExample(TestCase): """ Example test case """ dossier_nr = 33885 def test_get_voting_pages_for_dossier(self): """ Example test """ expected_urls = [ 'https://www.tweedekamer.nl/kamerstukken/stemmingsuitslagen/detail?id=2016P10154', 'https://www.tweedekamer.nl/kamerstukken/stemmingsuitslagen/detail?id=2016P10153' ] votings_urls = scraper.votings.get_voting_pages_for_dossier(self.dossier_nr) self.assertEqual(len(expected_urls), len(votings_urls)) for i in range(len(votings_urls)): self.assertEqual(votings_urls[i], expected_urls[i]) def test_get_votings_for_page(self): voting_page_urls = [ 'https://www.tweedekamer.nl/kamerstukken/stemmingsuitslagen/detail?id=2016P10154', 'https://www.tweedekamer.nl/kamerstukken/stemmingsuitslagen/detail?id=2016P10153' ] for url in voting_page_urls: votings = scraper.votings.get_votings_for_page(url) for voting in votings: print(voting)
Python
0.000003
b8d0344f0ca5c906e43d4071bc27a8d2acf114d1
bump version
webmpris/__init__.py
webmpris/__init__.py
__version__ = '1.1' __description__ = 'REST API to control media players via MPRIS2 interfaces' requires = [ 'pympris' ] README = """webmpris is a REST API to control media players via MPRIS2 interfaces. Supported intefaces: org.mpris.MediaPlayer2 via /players/<id>/Root org.mpris.MediaPlayer2.Player via /players/<id>/Player org.mpris.MediaPlayer2.TrackList via /players/<id>/TrackList org.mpris.MediaPlayer2.Playlists via /players/<id>/Playlists """
__version__ = '1.0' __description__ = 'REST API to control media players via MPRIS2 interfaces' requires = [ 'pympris' ] README = """webmpris is a REST API to control media players via MPRIS2 interfaces. Supported intefaces: org.mpris.MediaPlayer2 via /players/<id>/Root org.mpris.MediaPlayer2.Player via /players/<id>/Player org.mpris.MediaPlayer2.TrackList via /players/<id>/TrackList org.mpris.MediaPlayer2.Playlists via /players/<id>/Playlists """
Python
0
9acf7857167bb87438c7c0bebca1a7eda93ac23b
Make saml2idp compatible with Django 1.9
saml2idp/registry.py
saml2idp/registry.py
# -*- coding: utf-8 -*- from __future__ import absolute_import """ Registers and loads Processor classes from settings. """ import logging from importlib import import_module from django.core.exceptions import ImproperlyConfigured from . import exceptions from . import saml2idp_metadata logger = logging.getLogger(__name__) def get_processor(config): """ Get an instance of the processor with config. """ dottedpath = config['processor'] try: dot = dottedpath.rindex('.') except ValueError: raise ImproperlyConfigured('%s isn\'t a processors module' % dottedpath) sp_module, sp_classname = dottedpath[:dot], dottedpath[dot+1:] try: mod = import_module(sp_module) except ImportError, e: raise ImproperlyConfigured('Error importing processors %s: "%s"' % (sp_module, e)) try: sp_class = getattr(mod, sp_classname) except AttributeError: raise ImproperlyConfigured('processors module "%s" does not define a "%s" class' % (sp_module, sp_classname)) instance = sp_class(config) return instance def find_processor(request): """ Returns the Processor instance that is willing to handle this request. """ for name, sp_config in saml2idp_metadata.SAML2IDP_REMOTES.items(): proc = get_processor(sp_config) try: if proc.can_handle(request): return proc except exceptions.CannotHandleAssertion as exc: # Log these, but keep looking. logger.debug('%s %s' % (proc, exc)) raise exceptions.CannotHandleAssertion('None of the processors in SAML2IDP_REMOTES could handle this request.')
# -*- coding: utf-8 -*- from __future__ import absolute_import """ Registers and loads Processor classes from settings. """ # Python imports import logging # Django imports from django.utils.importlib import import_module from django.core.exceptions import ImproperlyConfigured # Local imports from . import exceptions from . import saml2idp_metadata # Setup logging logger = logging.getLogger(__name__) def get_processor(config): """ Get an instance of the processor with config. """ dottedpath = config['processor'] try: dot = dottedpath.rindex('.') except ValueError: raise ImproperlyConfigured('%s isn\'t a processors module' % dottedpath) sp_module, sp_classname = dottedpath[:dot], dottedpath[dot+1:] try: mod = import_module(sp_module) except ImportError, e: raise ImproperlyConfigured('Error importing processors %s: "%s"' % (sp_module, e)) try: sp_class = getattr(mod, sp_classname) except AttributeError: raise ImproperlyConfigured('processors module "%s" does not define a "%s" class' % (sp_module, sp_classname)) instance = sp_class(config) return instance def find_processor(request): """ Returns the Processor instance that is willing to handle this request. """ for name, sp_config in saml2idp_metadata.SAML2IDP_REMOTES.items(): proc = get_processor(sp_config) try: if proc.can_handle(request): return proc except exceptions.CannotHandleAssertion as exc: # Log these, but keep looking. logger.debug('%s %s' % (proc, exc)) raise exceptions.CannotHandleAssertion('None of the processors in SAML2IDP_REMOTES could handle this request.')
Python
0
b8cd1b6869651cd0cbe2cbeebc59c641f13e0e5b
Add todo for scopes permissions
polyaxon/scopes/permissions/scopes.py
polyaxon/scopes/permissions/scopes.py
from scopes.authentication.ephemeral import is_ephemeral_user from scopes.authentication.internal import is_internal_user from scopes.permissions.base import PolyaxonPermission class ScopesPermission(PolyaxonPermission): """ Scopes based Permissions, depends on the authentication backend. """ ENTITY = None SCOPE_MAPPING = None @staticmethod def _check_internal_or_ephemeral(request): return any([is_ephemeral_user(request.user), is_internal_user(request.user)]) def has_permission(self, request, view): if not request.auth: if not request.user.is_authenticated: return False # Session users are granted total access return True # TODO Add internal/ephemeral here # (if that type of auth is allowed, then we should not check he scope) if request.user.is_authenticated and request.user.is_superuser: return True allowed_scopes = set(self.SCOPE_MAPPING.get(request.method, [])) if not allowed_scopes: return True current_scopes = request.auth.scopes return any(s in allowed_scopes for s in current_scopes)
from scopes.authentication.ephemeral import is_ephemeral_user from scopes.authentication.internal import is_internal_user from scopes.permissions.base import PolyaxonPermission class ScopesPermission(PolyaxonPermission): """ Scopes based Permissions, depends on the authentication backend. """ ENTITY = None SCOPE_MAPPING = None @staticmethod def _check_internal_or_ephemeral(request): return any([is_ephemeral_user(request.user), is_internal_user(request.user)]) def has_permission(self, request, view): if not request.auth: if not request.user.is_authenticated: return False # Session users are granted total access return True if request.user.is_authenticated and request.user.is_superuser: return True allowed_scopes = set(self.SCOPE_MAPPING.get(request.method, [])) if not allowed_scopes: return True current_scopes = request.auth.scopes return any(s in allowed_scopes for s in current_scopes)
Python
0
ebacfc3ffe1cd1c9c58908c1f9dd78fe9eca9acd
fix for lambton not needed
ca_on_lambton/people.py
ca_on_lambton/people.py
from pupa.scrape import Scraper from utils import lxmlize, CanadianLegislator as Legislator import re COUNCIL_PAGE = 'http://www.lambtononline.ca/home/government/accessingcountycouncil/countycouncillors/Pages/default.aspx' class LambtonPersonScraper(Scraper): def get_people(self): page = lxmlize(COUNCIL_PAGE) councillors = page.xpath('//div[@id="WebPartWPQ1"]/table/tbody/tr[1]') for councillor in councillors: node = councillor.xpath('.//td[1]//strong//strong//strong//strong') or councillor.xpath('.//td[1]//strong') text = node[0].text_content() name = text.strip().replace('Deputy ', '').replace('Warden ', '').replace('Mayor', '') role = text.replace(name, '').strip() if not role: role = 'Councillor' if ',' in name: name = name.split(',')[0].strip() district = councillor.xpath('.//td[1]//p[contains(text(),",")]/text()')[0].split(',')[1].strip() district = re.sub(r'\A(?:City|Municipality|Town|Township|Village) of\b| Township\Z', '', district) p = Legislator(name=name, post_id=district, role=role) p.add_source(COUNCIL_PAGE) p.image = councillor.xpath('.//td[1]//img/@src')[0] info = councillor.xpath('.//td[2]')[0].text_content() residential_info = re.findall(r'(?<=Residence:)(.*)(?=Municipal Office:)', info, flags=re.DOTALL)[0] self.get_contacts(residential_info, 'residence', p) municipal_info = re.findall(r'(?<=Municipal Office:)(.*)', info, flags=re.DOTALL)[0] self.get_contacts(municipal_info, 'legislature', p) yield p def get_contacts(self, text, note, councillor): address = text.split('Telephone')[0] text = text.replace(address, '').split(':') for i, contact in enumerate(text): if i == 0: continue contact_type = next(x.strip() for x in re.findall(r'[A-Za-z ]+', text[i - 1]) if x.strip() and x.strip() != 'ext') if '@' in contact: contact = contact.strip() else: contact = re.findall(r'[0-9]{3}[- ][0-9]{3}-[0-9]{4}(?: ext\. [0-9]+)?', contact)[0].replace(' ', '-') if 'Fax' in contact_type: councillor.add_contact('fax', contact, note) elif 'Tel' in contact_type: councillor.add_contact('voice', contact, note) elif 'email' in contact_type: councillor.add_contact('email', contact, None) else: councillor.add_contact(contact_type, contact, note)
from pupa.scrape import Scraper from utils import lxmlize, CanadianLegislator as Legislator import re COUNCIL_PAGE = 'http://www.lambtononline.ca/home/government/accessingcountycouncil/countycouncillors/Pages/default.aspx' SGC = { 'St. Clair' : '3538003', 'Dawn-Euphemia' : '3538007', 'Brooke-Alvinston' : '3538015', 'Enniskillen' : '3538016', 'Oil Springs' : '3538018', 'Petrolia' : '3538019', 'Sarnia' : '3538030', 'Point Edward' : '3538031', 'Plympton-Wyoming' : '3538035', 'Lambton Shores' : '3538040', 'Warwick' : '3538043', } class LambtonPersonScraper(Scraper): def get_people(self): page = lxmlize(COUNCIL_PAGE) councillors = page.xpath('//div[@id="WebPartWPQ1"]/table/tbody/tr[1]') for councillor in councillors: node = councillor.xpath('.//td[1]//strong//strong//strong//strong') or councillor.xpath('.//td[1]//strong') text = node[0].text_content() name = text.strip().replace('Deputy ', '').replace('Warden ', '').replace('Mayor', '') role = text.replace(name, '').strip() if not role: role = 'Councillor' if ',' in name: name = name.split(',')[0].strip() district = councillor.xpath('.//td[1]//p[contains(text(),",")]/text()')[0].split(',')[1].strip() district = re.sub(r'\A(?:City|Municipality|Town|Township|Village) of\b| Township\Z', '', district) p = Legislator(name=name, post_id=district, role=role) p.add_source(COUNCIL_PAGE) p.image = councillor.xpath('.//td[1]//img/@src')[0] info = councillor.xpath('.//td[2]')[0].text_content() residential_info = re.findall(r'(?<=Residence:)(.*)(?=Municipal Office:)', info, flags=re.DOTALL)[0] self.get_contacts(residential_info, 'residence', p) municipal_info = re.findall(r'(?<=Municipal Office:)(.*)', info, flags=re.DOTALL)[0] self.get_contacts(municipal_info, 'legislature', p) # Needed for Represent integration. p.add_extra('sgc', SGC[district.strip()]) yield p def get_contacts(self, text, note, councillor): address = text.split('Telephone')[0] text = text.replace(address, '').split(':') for i, contact in enumerate(text): if i == 0: continue contact_type = next(x.strip() for x in re.findall(r'[A-Za-z ]+', text[i - 1]) if x.strip() and x.strip() != 'ext') if '@' in contact: contact = contact.strip() else: contact = re.findall(r'[0-9]{3}[- ][0-9]{3}-[0-9]{4}(?: ext\. [0-9]+)?', contact)[0].replace(' ', '-') if 'Fax' in contact_type: councillor.add_contact('fax', contact, note) elif 'Tel' in contact_type: councillor.add_contact('voice', contact, note) elif 'email' in contact_type: councillor.add_contact('email', contact, None) else: councillor.add_contact(contact_type, contact, note)
Python
0
c202a3a945453a4955f0acbf369227f8c9cee148
Rename link in init
__init__.py
__init__.py
import os from .batchflow import * __path__ = [os.path.join(os.path.dirname(__file__), 'batchflow')]
import os from .dataset import * __path__ = [os.path.join(os.path.dirname(__file__), 'dataset')]
Python
0
4a4731eda22170a77bb24dd3c7fc8ff4cafecf9d
bump version to 2.7b1
__init__.py
__init__.py
"""distutils The main package for the Python Module Distribution Utilities. Normally used from a setup script as from distutils.core import setup setup (...) """ __revision__ = "$Id$" # Distutils version # # Updated automatically by the Python release process. # #--start constants-- __version__ = "2.7b1" #--end constants--
"""distutils The main package for the Python Module Distribution Utilities. Normally used from a setup script as from distutils.core import setup setup (...) """ __revision__ = "$Id$" # Distutils version # # Updated automatically by the Python release process. # #--start constants-- __version__ = "2.7a4" #--end constants--
Python
0
bc43827ee733af9c37ca3b97b471ec1d2cde294b
Add unsubcribed handler to server.
echidna/server.py
echidna/server.py
import json from cyclone.web import Application, RequestHandler, HTTPError from cyclone.websocket import WebSocketHandler from echidna.cards.memory_store import InMemoryCardStore class EchidnaServer(Application): def __init__(self, root, **settings): self.store = InMemoryCardStore() handlers = [ (r"/", root), (r"/publish/(?P<channel>.*)/", PublicationHandler, dict(store=self.store)), (r"/subscribe", SubscriptionHandler, dict(store=self.store)), ] Application.__init__(self, handlers, **settings) class PublicationHandler(RequestHandler): def initialize(self, store): self.store = store def post(self, channel): try: channel = self.decode_argument(channel, "channel") except: raise HTTPError(400, "Invalid value for channel.") try: card = json.loads(self.request.body) except: raise HTTPError(400, "Invalid card in request body.") self.store.publish(channel, card) self.set_header("Content-Type", "application/json") self.write(json.dumps({"success": True})) class SubscriptionHandler(WebSocketHandler): def initialize(self, store): self.store = store self.client = None def _set_client(self, client): self.client = client def connectionMade(self, *args, **kw): d = self.store.create_client(self.on_publish) return d.addCallback(self._set_client) def connectionLost(self, reason): if self.client is not None: return self.store.remove_client(self.client) def messageReceived(self, msg): try: msg = json.loads(msg) except: return if not isinstance(msg, dict): return msg_type = msg.get("msg_type", "invalid") if not isinstance(msg_type, unicode): return handler = getattr(self, "handle_" + msg_type, self.handle_invalid) handler(msg) def on_publish(self, channel_name, card): return self.send_card(channel_name, card) def send_card(self, channel_name, card): msg = { "msg_type": "card", "channel": channel_name, "card": card, } self.sendMessage(json.dumps(msg)) def send_error(self, reason, **data): msg = { "msg_type": "error", "reason": reason, } msg.update(data) self.sendMessage(json.dumps(msg)) def send_cards(self, channel_name, cards): for card in cards: self.on_publish(channel_name, card) def handle_subscribe(self, msg): channel_name = msg.get("channel") if not isinstance(channel_name, unicode): return d = self.store.subscribe(channel_name, self.client) return d.addCallback( lambda cards: self.send_cards(channel_name, cards)) def handle_unsubscribed(self, msg): channel_name = msg.get("channel") if not isinstance(channel_name, unicode): return d = self.store.unsubscribe(channel_name, self.client) return d def handle_invalid(self, msg): self.send_error("invalid message", original_message=msg)
import json from cyclone.web import Application, RequestHandler, HTTPError from cyclone.websocket import WebSocketHandler from echidna.cards.memory_store import InMemoryCardStore class EchidnaServer(Application): def __init__(self, root, **settings): self.store = InMemoryCardStore() handlers = [ (r"/", root), (r"/publish/(?P<channel>.*)/", PublicationHandler, dict(store=self.store)), (r"/subscribe", SubscriptionHandler, dict(store=self.store)), ] Application.__init__(self, handlers, **settings) class PublicationHandler(RequestHandler): def initialize(self, store): self.store = store def post(self, channel): try: channel = self.decode_argument(channel, "channel") except: raise HTTPError(400, "Invalid value for channel.") try: card = json.loads(self.request.body) except: raise HTTPError(400, "Invalid card in request body.") self.store.publish(channel, card) self.set_header("Content-Type", "application/json") self.write(json.dumps({"success": True})) class SubscriptionHandler(WebSocketHandler): def initialize(self, store): self.store = store self.client = None def _set_client(self, client): self.client = client def connectionMade(self, *args, **kw): d = self.store.create_client(self.on_publish) return d.addCallback(self._set_client) def connectionLost(self, reason): if self.client is not None: return self.store.remove_client(self.client) def messageReceived(self, msg): try: msg = json.loads(msg) except: return if not isinstance(msg, dict): return msg_type = msg.get("msg_type", "invalid") if not isinstance(msg_type, unicode): return handler = getattr(self, "handle_" + msg_type, self.handle_invalid) handler(msg) def on_publish(self, channel_name, card): return self.send_card(channel_name, card) def send_card(self, channel_name, card): msg = { "msg_type": "card", "channel": channel_name, "card": card, } self.sendMessage(json.dumps(msg)) def send_error(self, reason, **data): msg = { "msg_type": "error", "reason": reason, } msg.update(data) self.sendMessage(json.dumps(msg)) def send_cards(self, channel_name, cards): for card in cards: self.on_publish(channel_name, card) def handle_subscribe(self, msg): channel_name = msg.get("channel") if not isinstance(channel_name, unicode): return d = self.store.subscribe(channel_name, self.client) return d.addCallback( lambda cards: self.send_cards(channel_name, cards)) def handle_invalid(self, msg): self.send_error("invalid message", original_message=msg)
Python
0
86eb16da4a6c3579eb514fa5ca73def7be8afd84
Add noqa codestyle
geotrek/api/v2/views/__init__.py
geotrek/api/v2/views/__init__.py
from rest_framework import response, permissions from rest_framework.views import APIView from django.conf import settings from django.contrib.gis.geos import Polygon from .authent import StructureViewSet # noqa from .common import TargetPortalViewSet, ThemeViewSet, SourceViewSet, ReservationSystemViewSet, LabelViewSet, OrganismViewSet # noqa if 'geotrek.core' in settings.INSTALLED_APPS: from .core import PathViewSet # noqa if 'geotrek.feedback' in settings.INSTALLED_APPS: from .feedback import ReportStatusViewSet, ReportActivityViewSet, ReportCategoryViewSet, ReportProblemMagnitudeViewSet # noqa if 'geotrek.trekking' in settings.INSTALLED_APPS: from .trekking import (TrekViewSet, TourViewSet, POIViewSet, POITypeViewSet, AccessibilityViewSet, RouteViewSet, # noqa DifficultyViewSet, NetworkViewSet, PracticeViewSet, # noqa WebLinkCategoryViewSet, ServiceTypeViewSet, ServiceViewSet, TrekRatingScaleViewSet, TrekRatingViewSet) # noqa if 'geotrek.sensitivity' in settings.INSTALLED_APPS: from .sensitivity import SensitiveAreaViewSet # noqa from .sensitivity import SportPracticeViewSet # noqa from .sensitivity import SpeciesViewSet # noqa if 'geotrek.tourism' in settings.INSTALLED_APPS: from .tourism import TouristicContentViewSet, TouristicEventViewSet, TouristicEventTypeViewSet, InformationDeskViewSet, TouristicContentCategoryViewSet # noqa if 'geotrek.zoning' in settings.INSTALLED_APPS: from .zoning import CityViewSet, DistrictViewSet # noqa if 'geotrek.outdoor' in settings.INSTALLED_APPS: from .outdoor import (SiteViewSet, OutdoorPracticeViewSet, SiteTypeViewSet, CourseTypeViewSet, # noqa OutdoorRatingScaleViewSet, OutdoorRatingViewSet, CourseViewSet, SectorViewSet) # noqa if 'geotrek.flatpages' in settings.INSTALLED_APPS: from .flatpages import FlatPageViewSet # noqa if 'geotrek.infrastructure' in settings.INSTALLED_APPS: from .infrastructure import InfrastructureTypeViewSet, InfrastructureViewSet, InfrastructureUsageDifficultyLevelViewSet, InfrastructureConditionViewSet, InfrastructureMaintenanceDifficultyLevelViewSet # noqa if 'geotrek.signage' in settings.INSTALLED_APPS: from .signage import SignageViewSet, SignageTypeViewSet, SealingViewSet, ColorViewSet, DirectionViewSet, BladeTypeViewSet # noqa if 'drf_yasg' in settings.INSTALLED_APPS: from .swagger import schema_view # noqa class ConfigView(APIView): """ Configuration endpoint that gives the BBox used in the Geotrek configuration """ permission_classes = [permissions.AllowAny, ] def get(self, request, *args, **kwargs): bbox = Polygon.from_bbox(settings.SPATIAL_EXTENT) bbox.srid = settings.SRID bbox.transform(settings.API_SRID) return response.Response({ 'bbox': bbox.extent })
from rest_framework import response, permissions from rest_framework.views import APIView from django.conf import settings from django.contrib.gis.geos import Polygon from .authent import StructureViewSet # noqa from .common import TargetPortalViewSet, ThemeViewSet, SourceViewSet, ReservationSystemViewSet, LabelViewSet, OrganismViewSet # noqa if 'geotrek.core' in settings.INSTALLED_APPS: from .core import PathViewSet # noqa if 'geotrek.feedback' in settings.INSTALLED_APPS: from .feedback import ReportStatusViewSet, ReportActivityViewSet, ReportCategoryViewSet, ReportProblemMagnitudeViewSet # noqa if 'geotrek.trekking' in settings.INSTALLED_APPS: from .trekking import (TrekViewSet, TourViewSet, POIViewSet, POITypeViewSet, AccessibilityViewSet, RouteViewSet, DifficultyViewSet, NetworkViewSet, PracticeViewSet, WebLinkCategoryViewSet, ServiceTypeViewSet, ServiceViewSet, TrekRatingScaleViewSet, TrekRatingViewSet) # noqa if 'geotrek.sensitivity' in settings.INSTALLED_APPS: from .sensitivity import SensitiveAreaViewSet # noqa from .sensitivity import SportPracticeViewSet # noqa from .sensitivity import SpeciesViewSet # noqa if 'geotrek.tourism' in settings.INSTALLED_APPS: from .tourism import TouristicContentViewSet, TouristicEventViewSet, TouristicEventTypeViewSet, InformationDeskViewSet, TouristicContentCategoryViewSet # noqa if 'geotrek.zoning' in settings.INSTALLED_APPS: from .zoning import CityViewSet, DistrictViewSet # noqa if 'geotrek.outdoor' in settings.INSTALLED_APPS: from .outdoor import (SiteViewSet, OutdoorPracticeViewSet, SiteTypeViewSet, CourseTypeViewSet, OutdoorRatingScaleViewSet, OutdoorRatingViewSet, CourseViewSet, SectorViewSet) # noqa if 'geotrek.flatpages' in settings.INSTALLED_APPS: from .flatpages import FlatPageViewSet # noqa if 'geotrek.infrastructure' in settings.INSTALLED_APPS: from .infrastructure import InfrastructureTypeViewSet, InfrastructureViewSet, InfrastructureUsageDifficultyLevelViewSet, InfrastructureConditionViewSet, InfrastructureMaintenanceDifficultyLevelViewSet # noqa if 'geotrek.signage' in settings.INSTALLED_APPS: from .signage import SignageViewSet, SignageTypeViewSet, SealingViewSet, ColorViewSet, DirectionViewSet, BladeTypeViewSet # noqa if 'drf_yasg' in settings.INSTALLED_APPS: from .swagger import schema_view # noqa class ConfigView(APIView): """ Configuration endpoint that gives the BBox used in the Geotrek configuration """ permission_classes = [permissions.AllowAny, ] def get(self, request, *args, **kwargs): bbox = Polygon.from_bbox(settings.SPATIAL_EXTENT) bbox.srid = settings.SRID bbox.transform(settings.API_SRID) return response.Response({ 'bbox': bbox.extent })
Python
0
dded8beb4a075dfc44938d5355727cc4058ba80b
Fix typo
athenet/data_loader/data_loader_buffer.py
athenet/data_loader/data_loader_buffer.py
"""Buffer for storing large network data.""" import numpy as np import theano class Buffer(object): """Buffer storing data from contiguous subsequence of minibatches. Content of a buffer is a 4-dimensional floating-point tensor. """ def __init__(self, data_loader=None): """Create data Buffer. :data_loader: Instance of DataLoader that will be using Buffer. """ self.begin = -1 self.end = 0 self.offset = theano.shared(0) self.parent = data_loader # Create a 4-dimensinal tensor shared variable for data. Exact size of # the tensor is determined when data is set, and can change over time. self._data = theano.shared( np.zeros((1, 1, 1, 1), dtype=theano.config.floatX), borrow=True) @property def data(self): """Shared variable representing data stored in a buffer.""" return self._data def __getitem__(self, key): """Return minibatches of given indices. Return data is taken from data array, however key represents minibatch index, not direct index in data array. Effectively, buffer can be used as if it contained all of the minibatches data. Parent must be set before using this method, as minibatch size is needed to determine shift that has to be used in data array. :key: Symbolic index or slice representing indices of minibatches to return. :return: Minibatches data. """ shift = self.offset * self.parent.batch_size if isinstance(key, slice): start, stop, step = key.start, key.stop, key.step return self._data[start-shift:stop-shift:step] else: return self._data[key-shift] def set(self, data, batch_index=None, n_of_batches=None): """Set buffer data. :data: Data to be stored in a buffer. :batch_index: Index of first minibatch that is contained in given data. :n_of_batches: Number of minibatches that are contained in given data. """ if batch_index: self.begin = batch_index self.offset.set_value(batch_index) if n_of_batches: self.end = batch_index + n_of_batches self._data.set_value( np.asarray(np.concatenate(data, axis=0), dtype=theano.config.floatX), borrow=True) def contains(self, batch_index): """Check if minibatch is contained in a buffer. :batch_index: Index of a minibatch. :return: True, if minibatch of a given index is contained in a buffer. False otherwise. """ return batch_index >= self.begin and batch_index < self.end
"""Buffer for storing large network data.""" import numpy as np import theano class Buffer(object): """Buffer storing data from contiguous subsequence of minibatches. Content of a buffer is a 4-dimensional floating-point tensor. """ def __init__(self, data_loader=None): """Create data Buffer. :data_loader: Instance of DataLoader that will be using Buffer. """ self.begin = -1 self.end = 0 self.offset = theano.shared(0) self.parent = data_loader # Create a 4-dimensinal tensor shared variable for data. Exact size of # the tensor is determined when data is set, and can change over time. self._data = theano.shared( np.zeros((1, 1, 1, 1), dtype=theano.config.floatX), borrow=True) @property def data(self): """Shared variable representing data stored in a buffer.""" return self._data def __getitem__(self, key): """Return minibatches of given indices. Return data is taken from data array, however key represents minibatch index, not direct index in data array. Effectively, buffer can be used as if it contained all of the minibatches data. Parent must be set before using this method, as minibatch size is needed to determine shift that has to be uses in data array. :key: Symbolic index or slice representing indices of minibatches to return. :return: Minibatches data. """ shift = self.offset * self.parent.batch_size if isinstance(key, slice): start, stop, step = key.start, key.stop, key.step return self._data[start-shift:stop-shift:step] else: return self._data[key-shift] def set(self, data, batch_index=None, n_of_batches=None): """Set buffer data. :data: Data to be stored in a buffer. :batch_index: Index of first minibatch that is contained in given data. :n_of_batches: Number of minibatches that are contained in given data. """ if batch_index: self.begin = batch_index self.offset.set_value(batch_index) if n_of_batches: self.end = batch_index + n_of_batches self._data.set_value( np.asarray(np.concatenate(data, axis=0), dtype=theano.config.floatX), borrow=True) def contains(self, batch_index): """Check if minibatch is contained in a buffer. :batch_index: Index of a minibatch. :return: True, if minibatch of a given index is contained in a buffer. False otherwise. """ return batch_index >= self.begin and batch_index < self.end
Python
0.999999
a962f1e0aced277e673eddc6b70e316bba482f24
fix typo
api/mail.py
api/mail.py
from flask import Flask, render_template from api import app from api.models import User, Invites, Reset from flask_mail import Mail from flask_mail import Message app.config.update( MAIL_SERVER = 'smtp.yandex.com', MAIL_PORT = 465, MAIL_USE_SSL = True , MAIL_USERNAME = 'cross-apps@yandex.com', MAIL_PASSWORD = 'innovativeproject', ) mail = Mail(app) def send_email(subject, sender, recipients, html_body): """ Sends email of given subject, sender, recipents (array) and html template. """ msg = Message(subject=subject, sender=sender, recipients=recipients) msg.html = html_body mail.send(msg) def send_email_register(sender,recip): """ User invitation email. """ email = recip[0] username = email.split('@')[0] admin = sender.split('@')[0] new = Invites.query.filter_by(email = email).first() url = 'https://cross-app-links.herokuapp.com/api/auth/setpassword?token=' + str(new.token) subject = "Cross-apps registration" headerText = "You've received an invitation!" freeText = "Administrator has invited you to join Cross-apps shortcuts!" userTextBold = "You can complete your registartion by clicking the button or entering the link. \n Set up your unique password and make yourself home!" userText = "" send_email(subject, 'cross-apps@yandex.com', recip, render_template("email_template.html", user=username, sender=admin, url=url, subject=subject, buttonText="Register", headerText=headerText, freeText=freeText, userTextBold=userTextBold, userText=userText)) def send_email_reset(email): """ User password reset email. """ recipent = email[0] username = recipent.split('@')[0] new = Reset.query.filter_by(email = recipent).first() url = 'https://cross-app-links.herokuapp.com/api/auth/setnewpassword?token=' + str(new.token) subject = "Cross-apps password reset" headerText = "Looks like you want to reset your password!" freeText = "Here we send you instructions to set up a new password for your account!" userTextBold = "Please proceed by clicking the button. \n You will be displayed a page that will allow you to set a new password." userText = "If you forget your password again, please consider drinking green tea. Green tea contains polyphenols, powerful antioxidants that protect against free radicals that can damage brain cells. Among many other benefits, regular consumption of green tea may enhance memory and mental alertness and slow brain aging." send_email(subject, 'cross-apps@yandex.com', email, render_template("email_template.html", user=username, sender="system", url=url, subject=subject, buttonText="RESET", headerText=headerText, freeText=freeText, userTextBold=userTextBold, userText=userText))
from flask import Flask, render_template from api import app from api.models import User, Invites, Reset from flask_mail import Mail from flask_mail import Message app.config.update( MAIL_SERVER = 'smtp.yandex.com', MAIL_PORT = 465, MAIL_USE_SSL = True , MAIL_USERNAME = 'cross-apps@yandex.com', MAIL_PASSWORD = 'innovativeproject', ) mail = Mail(app) def send_email(subject, sender, recipients, html_body): """ Sends email of given subject, sender, recipents (array) and html template. """ msg = Message(subject=subject, sender=sender, recipients=recipients) msg.html = html_body mail.send(msg) def send_email_register(sender,recip): """ User invitation email. """ email = recip[0] username = email.split('@')[0] admin = sender.split('@')[0] new = Invites.query.filter_by(email = email).first() url = 'https://cross-app-links.herokuapp.com/api/auth/setpassword?token=' + str(new.token) subject = "Cross-apps registration" headerText = "You've received an invitation!" freeText = "Administrator has invited you to join Cross-apps shortcuts!" userTextBold = "You can complete your registartion by clicking the button or entering the link. \n Set up your unique password and make yourself home!" userText = "" send_email(subject, 'cross-apps@yandex.com', recip, render_template("email_reset_template.html", user=username, sender=admin, url=url, subject=subject, buttonText="Register", headerText=headerText, freeText=freeText, userTextBold=userTextBold, userText=userText)) def send_email_reset(email): """ User password reset email. """ recipent = email[0] username = recipent.split('@')[0] new = Reset.query.filter_by(email = recipent).first() url = 'https://cross-app-links.herokuapp.com/api/auth/setnewpassword?token=' + str(new.token) subject = "Cross-apps password reset" headerText = "Looks like you want to reset your password!" freeText = "Here we send you instructions to set up a new password for your account!" userTextBold = "Please proceed by clicking the button. \n You will be displayed a page that will allow you to set a new password." userText = "If you forget your password again, please consider drinking green tea. Green tea contains polyphenols, powerful antioxidants that protect against free radicals that can damage brain cells. Among many other benefits, regular consumption of green tea may enhance memory and mental alertness and slow brain aging." send_email(subject, 'cross-apps@yandex.com', email, render_template("email_template.html", user=username, sender="system", url=url, subject=subject, buttonText="RESET", headerText=headerText, freeText=freeText, userTextBold=userTextBold, userText=userText))
Python
0.998939
f9a1da6e60bfbd9c9e5be769f1223d628cec6481
set the module version
base_external_referentials/__openerp__.py
base_external_referentials/__openerp__.py
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2009 Akretion (<http://www.akretion.com>). All Rights Reserved # authors: Raphaël Valyi, Sharoon Thomas # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Base External Referentials', 'version': '6.1.0', 'category': 'Generic Modules/Base', 'description': """ Definition : a referential is an external system that will interacts with OpenERP Goal : store external system connection details and objects fields mapping This module provide an abstract common minimal base to add any additional external id columns to some OpenObject table, pointing to some external referential. A referential is abstract and minimal at this stage, it only has: * a name * a location (possibly webservice URL, database connection URL...); the connection method will tell it... * referential credentials (user name + password) * placeholders for custom in and out mapping for OpenERP object fields. OpenERP already has limited supported to external ids using the ir_model_data and the id fields in the loaded data such as XML or CSV. We think that's OK to store all referential ids into the same ir_model_data table: yes it makes it large, but synchronisation operations involve a network bottleneck anyway, so it's largely OK and negligible to have a large table here. The existing ir_model_data feature of OpenERP is mostly thought as an mono-external referential (even if the module key of ir_model_data plays some referential scoping role). Here we just push the concept further to assume multiple external ids for OpenERP entities and add the possibility to customize their field mapping directly in OpenERP to accomodate the external systems. """, 'author': 'Raphaël Valyi (Akretion.com), Sharoon Thomas (Openlabs.co.in)', 'website': 'http://www.akretion.com, http://openlabs.co.in/', 'depends': ['base','base_pop_up', 'base_file_protocole', 'email_template'], 'init_xml': [], 'update_xml': [ 'external_referentials_view.xml', 'report_view.xml', 'external_referentials_menu.xml', 'security/ir.model.access.csv', 'group_fields_view.xml', 'security/base_external_referentials_security.xml', 'report_mail_template.xml', ], 'demo_xml': [], 'installable': True, 'certificate': '', } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2009 Akretion (<http://www.akretion.com>). All Rights Reserved # authors: Raphaël Valyi, Sharoon Thomas # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Base External Referentials', 'version': '1.0', 'category': 'Generic Modules/Base', 'description': """ Definition : a referential is an external system that will interacts with OpenERP Goal : store external system connection details and objects fields mapping This module provide an abstract common minimal base to add any additional external id columns to some OpenObject table, pointing to some external referential. A referential is abstract and minimal at this stage, it only has: * a name * a location (possibly webservice URL, database connection URL...); the connection method will tell it... * referential credentials (user name + password) * placeholders for custom in and out mapping for OpenERP object fields. OpenERP already has limited supported to external ids using the ir_model_data and the id fields in the loaded data such as XML or CSV. We think that's OK to store all referential ids into the same ir_model_data table: yes it makes it large, but synchronisation operations involve a network bottleneck anyway, so it's largely OK and negligible to have a large table here. The existing ir_model_data feature of OpenERP is mostly thought as an mono-external referential (even if the module key of ir_model_data plays some referential scoping role). Here we just push the concept further to assume multiple external ids for OpenERP entities and add the possibility to customize their field mapping directly in OpenERP to accomodate the external systems. """, 'author': 'Raphaël Valyi (Akretion.com), Sharoon Thomas (Openlabs.co.in)', 'website': 'http://www.akretion.com, http://openlabs.co.in/', 'depends': ['base','base_pop_up', 'base_file_protocole', 'email_template'], 'init_xml': [], 'update_xml': [ 'external_referentials_view.xml', 'report_view.xml', 'external_referentials_menu.xml', 'security/ir.model.access.csv', 'group_fields_view.xml', 'security/base_external_referentials_security.xml', 'report_mail_template.xml', ], 'demo_xml': [], 'installable': True, 'certificate': '', } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
Python
0
6eeb2b4f79c2f735552cf7c061b48425d3299e51
Use argparse.
validate_equajson.py
validate_equajson.py
#! /usr/bin/env python3 import json import jsonschema import sys import os import argparse def main(equajson_path, schema_path): global filepath filepath = equajson_path with open(schema_path) as schema_file: try: equajson_schema = json.load(schema_file) except: sys.stderr.write("Invalid JSON in schema: `"+schema_file.name+"'"+'\n') raise with open(equajson_path) as json_file: try: equajson = json.load(json_file) except: sys.stderr.write("Invalid JSON in file: `"+json_file.name+"'"+'\n') raise try: jsonschema.validate(equajson, equajson_schema) except jsonschema.exceptions.ValidationError: sys.stderr.write(json_file.name+'\n') raise basename_no_extension = os.path.splitext(os.path.basename(json_file.name))[0] # It's easier to make this a global variable # than to thread it through every function. filepath = None if __name__ == '__main__': parser = argparse.ArgumentParser(description='validate equajson files') parser.add_argument( '-s', '--schema', help='path to schema file', required=True ) parser.add_argument( 'json_file', help='path to json file to validate' ) args = parser.parse_args() main(args.json_file, args.schema)
#! /usr/bin/env python3 import json import jsonschema import sys import os def main(equajson_path, schema_path): global filepath filepath = equajson_path with open(schema_path) as schema_file: try: equajson_schema = json.load(schema_file) except: sys.stderr.write("Invalid JSON in schema: `"+schema_file.name+"'"+'\n') raise with open(equajson_path) as json_file: try: equajson = json.load(json_file) except: sys.stderr.write("Invalid JSON in file: `"+json_file.name+"'"+'\n') raise try: jsonschema.validate(equajson, equajson_schema) except jsonschema.exceptions.ValidationError: sys.stderr.write(json_file.name+'\n') raise basename_no_extension = os.path.splitext(os.path.basename(json_file.name))[0] # It's easier to make this a global variable # than to thread it through every function. filepath = None if __name__ == '__main__': num_args = len(sys.argv) - 1 if num_args != 2: sys.stderr.write("Usage: python "+sys.argv[0]+" equajson.json schema.json"+'\n') sys.exit(1) main(sys.argv[1], sys.argv[2])
Python
0.000001
5ecd20d86a0fe2586cbac4daadd34bb13443f94d
set central prototype executable
central/CentralProto.py
central/CentralProto.py
#!/usr/bin/python # -*- coding: utf-8 -*- import time from app.nrf24 import NRF24 from app.cipher import XTEA from app.message import MessageType # RF Communication constants NETWORK = 0xC05A SERVER_ID = 0x01 # Hardware constants CE_PIN = 25 # Timing constants PERIOD_REFRESH_KEY_SECS = 120.0 CODE = '123456' #TODO refactor all conversion methods into a common place def byte(val): return val & 0xFF def to_int(val): return (byte(val[0]) << 8) + byte(val[1]) def to_long(val): return (byte(val[0]) << 24) + (byte(val[1]) << 16) + (byte(val[2]) << 8) + byte(val[3]) def from_long(val): return [byte(val >> 24), byte(val >> 16), byte(val >> 8), byte (val)] def convert_key(key): key2 = [] for i in key: key2 += from_long(i) return key2 class Device: def __init__(self): self.cipher = XTEA() self.latest_ping = time.time() self.latest_voltage_level = None self.next_key_time = 0 # List of all devices and their encoding keys keys = {} # Current alarm status locked = True if __name__ == '__main__': print "Alarm System Central Prototype..." nrf = NRF24(NETWORK, SERVER_ID) print "NRF24 instance created..." nrf.begin(0, 0, CE_PIN) print "NRF24 instance started..." while True: # Wait forever for remote modules calls #FIXME we should limit the timeout in order to frequently check that all known devices # are pinging as expected... payload = nrf.recv() now = time.clock() # Have we received something? if payload: # Yes, find the originating device and port (message type) device_id = payload.device port = payload.port content = payload.content # Add the device if first time device = keys.get(device_id) if not device: device = Device() keys[device_id] = device print "Source %02X, port %02X" % (device, port) # Manage received message based on its type (port) if port == MessageType.PING_SERVER: device.latest_ping = now payload = [locked] # Check if need to generate and send new cipher key if now >= device.next_key_time: key = XTEA.generate_key() device.cipher.set_key(key) device.next_key_time = now + PERIOD_REFRESH_KEY_SECS payload = [locked] payload += convert_key(key) nrf.send(device_id, port, payload) elif port == MessageType.VOLTAGE_LEVEL: device.latest_voltage_level = to_int(content) print "Source %02X, voltage = %d mV" % (device, device.latest_voltage_level) elif port in [MessageType.LOCK_CODE, MessageType.UNLOCK_CODE]: #TODO decipher code = device.cipher.decipher([to_long(content[0:4]), to_long(content[4:8])]) code = from_long(code[0]) + from_long(code[1]) print "Source %02X, code = %s" % (device, code) #TODO convert to string and compare to CODE # Send current lock status nrf.send(device_id, port, [locked]) else: print "Source %02X, unknown port %02X!" % (device, port)
#!/usr/bin/python # -*- coding: utf-8 -*- import time from app.nrf24 import NRF24 from app.cipher import XTEA from app.message import MessageType # RF Communication constants NETWORK = 0xC05A SERVER_ID = 0x01 # Hardware constants CE_PIN = 25 # Timing constants PERIOD_REFRESH_KEY_SECS = 120.0 CODE = '123456' #TODO refactor all conversion methods into a common place def byte(val): return val & 0xFF def to_int(val): return (byte(val[0]) << 8) + byte(val[1]) def to_long(val): return (byte(val[0]) << 24) + (byte(val[1]) << 16) + (byte(val[2]) << 8) + byte(val[3]) def from_long(val): return [byte(val >> 24), byte(val >> 16), byte(val >> 8), byte (val)] def convert_key(key): key2 = [] for i in key: key2 += from_long(i) return key2 class Device: def __init__(self): self.cipher = XTEA() self.latest_ping = time.time() self.latest_voltage_level = None self.next_key_time = 0 # List of all devices and their encoding keys keys = {} # Current alarm status locked = True if __name__ == '__main__': print "Alarm System Central Prototype..." nrf = NRF24(NETWORK, SERVER_ID) print "NRF24 instance created..." nrf.begin(0, 0, CE_PIN) print "NRF24 instance started..." while True: # Wait forever for remote modules calls #FIXME we should limit the timeout in order to frequently check that all known devices # are pinging as expected... payload = nrf.recv() now = time.clock() # Have we received something? if payload: # Yes, find the originating device and port (message type) device_id = payload.device port = payload.port content = payload.content # Add the device if first time device = keys.get(device_id) if not device: device = Device() keys[device_id] = device print "Source %02X, port %02X" % (device, port) # Manage received message based on its type (port) if port == MessageType.PING_SERVER: device.latest_ping = now payload = [locked] # Check if need to generate and send new cipher key if now >= device.next_key_time: key = XTEA.generate_key() device.cipher.set_key(key) device.next_key_time = now + PERIOD_REFRESH_KEY_SECS payload = [locked] payload += convert_key(key) nrf.send(device_id, port, payload) elif port == MessageType.VOLTAGE_LEVEL: device.latest_voltage_level = to_int(content) print "Source %02X, voltage = %d mV" % (device, device.latest_voltage_level) elif port in [MessageType.LOCK_CODE, MessageType.UNLOCK_CODE]: #TODO decipher code = device.cipher.decipher([to_long(content[0:4]), to_long(content[4:8])]) code = from_long(code[0]) + from_long(code[1]) print "Source %02X, code = %s" % (device, code) #TODO convert to string and compare to CODE # Send current lock status nrf.send(device_id, port, [locked]) else: print "Source %02X, unknown port %02X!" % (device, port)
Python
0.000001
e6cb1617e588d6b276fe01c401f2c1b34cf88d5f
fix stuff
api/read.py
api/read.py
import datetime from django.http import JsonResponse from dateutil.parser import parse from django.contrib.auth.decorators import login_required from api.models import ( Applicant, Client, Disabilities, EmploymentEducation, Enrollment, HealthAndDV, IncomeBenefits, Services ) def get_applicants(request): applicant = {} return JsonResponse(applicant) def search_clients(request): ''' request.POST = query ''' clients = Client.objects.all() if 'query' in request.POST: q = request.POST['query'] if q.isdigit(): clients = clients.filter(uuid=q) else: clients = clients.filter(last_name__contains=q) return JsonResponse([{ "first_name": c.first_name, "middle_name": c.middle_name, "last_name": c.last_name, "social_security": c.social_security, "date_of_birth": datetime.datetime.strftime(c.date_of_birth, '%m/%d/%Y'), "ethnicity": 1, "gender": 1, "veteran": 1, "year_entered": c.year_entered, "year_exited": c.year_exited, "date_created": c.date_created } for c in clients], safe=False) def get_applicants(request): app_list = Applicant.objects.all() applicant = [{ "id": c.id, "first_name": c.first_name, "last_name": c.last_name, "why": c.why, "phone": c.phone, "email": c.emial, "address": c.address, "birthday": c.birthday, "ethnicity": value_maps.ethnicity[c.ethnicity], "gender": value_maps.gender[c.gender], "veteran": value_maps.veteran[c.veteran], "family": c.family, "domestic_violence": value_maps.domestic_violence[c.domestic_violence], "pregnancy": c.pregnancy, "drug": c.drug, "urgency": c.urgency, "created": c.created, "reviewed": c.reviewed, } for c in app_list] return JsonResponse(applicant, safe=False)
import datetime from django.http import JsonResponse from dateutil.parser import parse from django.contrib.auth.decorators import login_required from api.models import ( Applicant, Client, Disabilities, EmploymentEducation, Enrollment, HealthAndDV, IncomeBenefits, Services ) def get_applicants(request): applicant = {} return JsonResponse(applicant) def search_clients(request): ''' request.POST = query ''' clients = Client.objects.all() if 'query' in request.POST: q = request.POST['query'] if q.isdigit(): clients = clients.filter(uuid=q) else: clients = clients.filter(last_name__contains=q) return JsonResponse([{ "first_name": c.first_name, "middle_name": c.middle_name, "last_name": c.last_name, "social_security": c.social_security, "date_of_birth": datetime.datetime.strftime(c.date_of_birth, '%m/%d/%Y'), "ethnicity": 1, "gender": 1, "veteran": 1, "year_entered": c.year_entered, "year_exited": c.year_exited, "date_created": c.date_created } for c in clients], safe=False) <<<<<<< Updated upstream def get_applicants(request): app_list = Applicant.objects.all() applicant = [{ "id": c.id, "first_name": c.first_name, "last_name": c.last_name, "why": c.why, "phone": c.phone, "email": c.emial, "address": c.address, "birthday": c.birthday, "ethnicity": value_maps.ethnicity[c.ethnicity], "gender": value_maps.gender[c.gender], "veteran": value_maps.veteran[c.veteran], "family": c.family, "domestic_violence": value_maps.domestic_violence[c.domestic_violence], "pregnancy": c.pregnancy, "drug": c.drug, "urgency": c.urgency, "created": c.created, "reviewed": c.reviewed, } for c in app_list] return JsonResponse(applicant, safe=False)
Python
0.000002
ae7b583cab8d38b04ce57571f50221b4a2e429f6
Update base.py
webhook/base.py
webhook/base.py
""" Base webhook implementation """ import json from django.http import HttpResponse from django.views.generic import View from django.utils.decorators import method_decorator from django.views.decorators.csrf import csrf_exempt class WebhookBase(View): """ Simple Webhook base class to handle the most standard case. """ @method_decorator(csrf_exempt) def dispatch(self, request, *args, **kwargs): return super(WebhookBase, self).dispatch(request, *args, **kwargs) def post(self, request, *args, **kwargs): data = json.loads(request.body.decode('utf-8')) self.process_webhook(data) return HttpResponse(status=200) def process_webhook(self, data=None): """ Unimplemented method """ raise NotImplementedError
""" Base webhook implementation """ import json from django.http import HttpResponse from django.views.generic import View from django.utils.decorators import method_decorator from django.views.decorators.csrf import csrf_exempt class WebhookBase(View): """ Simple Webhook base class to handle the most standard case. """ @method_decorator(csrf_exempt) def dispatch(self, request, *args, **kwargs): return super(WebhookBase, self).dispatch(request, *args, **kwargs) def post(self, request, *args, **kwargs): data = json.loads(request.body.decode('utf-8')) self.process_webhook(data) return HttpResponse(status=200) def process_webhook(self, data): """ Unimplemented method """ raise NotImplementedError
Python
0.000001
46b860e93d8a9e8dda3499b7306e30ebcd0e0174
handle session stopped
webnotes/app.py
webnotes/app.py
import sys, os import json sys.path.insert(0, '.') sys.path.insert(0, 'app') sys.path.insert(0, 'lib') from werkzeug.wrappers import Request, Response from werkzeug.local import LocalManager from webnotes.middlewares import StaticDataMiddleware from werkzeug.exceptions import HTTPException from werkzeug.contrib.profiler import ProfilerMiddleware from webnotes import get_config import mimetypes import webnotes import webnotes.handler import webnotes.auth import webnotes.webutils local_manager = LocalManager([webnotes.local]) def handle_session_stopped(): res = Response("""<html> <body style="background-color: #EEE;"> <h3 style="width: 900px; background-color: #FFF; border: 2px solid #AAA; padding: 20px; font-family: Arial; margin: 20px auto"> Updating. We will be back in a few moments... </h3> </body> </html>""") res.status_code = 503 res.content_type = 'text/html' return res @Request.application def application(request): webnotes.local.request = request try: site = webnotes.utils.get_site_name(request.host) webnotes.init(site=site) webnotes.local.form_dict = webnotes._dict({ k:v[0] if isinstance(v, (list, tuple)) else v \ for k, v in (request.form or request.args).iteritems() }) webnotes.local._response = Response() try: webnotes.http_request = webnotes.auth.HTTPRequest() except webnotes.AuthenticationError, e: pass if webnotes.form_dict.cmd: webnotes.handler.handle() else: webnotes.webutils.render(webnotes.request.path[1:]) except HTTPException, e: return e except webnotes.SessionStopped, e: webnotes.local._response = handle_session_stopped() finally: if webnotes.conn: webnotes.conn.close() return webnotes.local._response application = local_manager.make_middleware(application) def serve(port=8000, profile=False): webnotes.validate_versions() global application from werkzeug.serving import run_simple if profile: application = ProfilerMiddleware(application) application = StaticDataMiddleware(application, { '/': 'public', }) run_simple('0.0.0.0', int(port), application, use_reloader=True, use_debugger=True, use_evalex=True)
import sys, os import json sys.path.insert(0, '.') sys.path.insert(0, 'app') sys.path.insert(0, 'lib') from werkzeug.wrappers import Request, Response from werkzeug.local import LocalManager from webnotes.middlewares import StaticDataMiddleware from werkzeug.exceptions import HTTPException from werkzeug.contrib.profiler import ProfilerMiddleware from webnotes import get_config import mimetypes import webnotes import webnotes.handler import webnotes.auth import webnotes.webutils local_manager = LocalManager([webnotes.local]) @Request.application def application(request): webnotes.local.request = request try: site = webnotes.utils.get_site_name(request.host) webnotes.init(site=site) webnotes.local.form_dict = webnotes._dict({ k:v[0] if isinstance(v, (list, tuple)) else v \ for k, v in (request.form or request.args).iteritems() }) webnotes.local._response = Response() try: webnotes.http_request = webnotes.auth.HTTPRequest() except webnotes.AuthenticationError, e: pass if webnotes.form_dict.cmd: webnotes.handler.handle() else: webnotes.webutils.render(webnotes.request.path[1:]) except HTTPException, e: return e finally: if webnotes.conn: webnotes.conn.close() return webnotes._response application = local_manager.make_middleware(application) def serve(port=8000, profile=False): webnotes.validate_versions() global application from werkzeug.serving import run_simple if profile: application = ProfilerMiddleware(application) application = StaticDataMiddleware(application, { '/': 'public', }) run_simple('0.0.0.0', int(port), application, use_reloader=True, use_debugger=True, use_evalex=True)
Python
0
e38fa3f55b0e60a1d6c7fa0cf194e6f3bd4b899d
add histogram util
corehq/util/datadog/gauges.py
corehq/util/datadog/gauges.py
from functools import wraps from celery.task import periodic_task from corehq.util.datadog import statsd, datadog_logger from corehq.util.soft_assert import soft_assert def datadog_gauge_task(name, fn, run_every, enforce_prefix='commcare'): """ helper for easily registering datadog gauges to run periodically To update a datadog gauge on a schedule based on the result of a function just add to your app's tasks.py: my_calculation = datadog_gauge_task('my.datadog.metric', my_calculation_function, run_every=crontab(minute=0)) """ _enforce_prefix(name, enforce_prefix) datadog_gauge = _DatadogGauge(name, fn, run_every) return datadog_gauge.periodic_task() def datadog_histogram(name, value, enforce_prefix='commcare', tags=None): """ Usage: Used to track the statistical distribution of a set of values over a statsd flush period. Actually submits as multiple metrics: """ _datadog_record(statsd.histogram, name, value, enforce_prefix, tags) def datadog_gauge(name, value, enforce_prefix='commcare', tags=None): _datadog_record(statsd.gauge, name, value, enforce_prefix, tags) def datadog_counter(name, value=1, enforce_prefix='commcare', tags=None): _datadog_record(statsd.increment, name, value, enforce_prefix, tags) def _datadog_record(fn, name, value, enforce_prefix='commcare', tags=None): _enforce_prefix(name, enforce_prefix) try: fn(name, value, tags=tags) except Exception: datadog_logger.exception('Unable to record Datadog stats') class _DatadogGauge(object): def __init__(self, name, fn, run_every): self.name = name self.fn = fn self.run_every = run_every def periodic_task(self): @periodic_task('background_queue', run_every=self.run_every, acks_late=True, ignore_result=True) @wraps(self.fn) def inner(*args, **kwargs): statsd.gauge(self.name, self.fn(*args, **kwargs)) return inner def _enforce_prefix(name, prefix): soft_assert(fail_if_debug=True).call( not prefix or name.split('.')[0] == prefix, "Did you mean to call your gauge 'commcare.{}'? " "If you're sure you want to forgo the prefix, you can " "pass enforce_prefix=None".format(name))
from functools import wraps from celery.task import periodic_task from corehq.util.datadog import statsd, datadog_logger from corehq.util.soft_assert import soft_assert def datadog_gauge_task(name, fn, run_every, enforce_prefix='commcare'): """ helper for easily registering datadog gauges to run periodically To update a datadog gauge on a schedule based on the result of a function just add to your app's tasks.py: my_calculation = datadog_gauge_task('my.datadog.metric', my_calculation_function, run_every=crontab(minute=0)) """ _enforce_prefix(name, enforce_prefix) datadog_gauge = _DatadogGauge(name, fn, run_every) return datadog_gauge.periodic_task() def datadog_gauge(name, value, enforce_prefix='commcare', tags=None): _datadog_record(statsd.gauge, name, value, enforce_prefix, tags) def datadog_counter(name, value=1, enforce_prefix='commcare', tags=None): _datadog_record(statsd.increment, name, value, enforce_prefix, tags) def _datadog_record(fn, name, value, enforce_prefix='commcare', tags=None): _enforce_prefix(name, enforce_prefix) try: fn(name, value, tags=tags) except Exception: datadog_logger.exception('Unable to record Datadog stats') class _DatadogGauge(object): def __init__(self, name, fn, run_every): self.name = name self.fn = fn self.run_every = run_every def periodic_task(self): @periodic_task('background_queue', run_every=self.run_every, acks_late=True, ignore_result=True) @wraps(self.fn) def inner(*args, **kwargs): statsd.gauge(self.name, self.fn(*args, **kwargs)) return inner def _enforce_prefix(name, prefix): soft_assert(fail_if_debug=True).call( not prefix or name.split('.')[0] == prefix, "Did you mean to call your gauge 'commcare.{}'? " "If you're sure you want to forgo the prefix, you can " "pass enforce_prefix=None".format(name))
Python
0.000786
3643f0ce1b7ea7982e8081ae29e726c73471cc4b
update description
vcspull/__about__.py
vcspull/__about__.py
__title__ = 'vcspull' __package_name__ = 'vcspull' __description__ = 'synchronize your repos' __version__ = '1.0.0' __author__ = 'Tony Narlock' __email__ = 'tony@git-pull.com' __license__ = 'BSD' __copyright__ = 'Copyright 2013-2016 Tony Narlock'
__title__ = 'vcspull' __package_name__ = 'vcspull' __description__ = 'vcs project manager' __version__ = '1.0.0' __author__ = 'Tony Narlock' __email__ = 'tony@git-pull.com' __license__ = 'BSD' __copyright__ = 'Copyright 2013-2016 Tony Narlock'
Python
0.000001
42561d709a2ecfee71103dfbb55116cec1128b71
fix redirect after upload
website/apps/home/views/UploadView.py
website/apps/home/views/UploadView.py
#!/bin/env python2 # -*- coding: utf-8 -*- # # This file is part of the VecNet Zika modeling interface. # For copyright and licensing information about this package, see the # NOTICE.txt and LICENSE.txt files in its top-level directory; they are # available at https://github.com/vecnet/zika # # This Source Code Form is subject to the terms of the Mozilla Public # License (MPL), version 2.0. If a copy of the MPL was not distributed # with this file, You can obtain one at http://mozilla.org/MPL/2.0/. import logging from django.core.urlresolvers import reverse from django.db import transaction from django.http.response import HttpResponseBadRequest, HttpResponseRedirect from django.views.generic.base import TemplateView from website.apps.home.utils import load_simulation_file logger = logging.getLogger(__name__) class UploadView(TemplateView): template_name = "../templates/simulation/upload.html" @transaction.atomic def post(self, request, *args, **kwargs): if request.method == 'POST': if not request.FILES['output_file']: return HttpResponseBadRequest("No 'output_file' is provided") else: sim_name = self.request.POST.get(u"name", None) is_historical = self.request.POST.get("historical") load_simulation_file(request.FILES['output_file'], simulation_name=sim_name, is_historical=is_historical) # Redirect to appropriate page whether uploading simulation or historical if is_historical!='on': return HttpResponseRedirect(reverse('home.display_simulations')) else: return HttpResponseRedirect(reverse('home.display_historical')) else: return HttpResponseRedirect("")
#!/bin/env python2 # -*- coding: utf-8 -*- # # This file is part of the VecNet Zika modeling interface. # For copyright and licensing information about this package, see the # NOTICE.txt and LICENSE.txt files in its top-level directory; they are # available at https://github.com/vecnet/zika # # This Source Code Form is subject to the terms of the Mozilla Public # License (MPL), version 2.0. If a copy of the MPL was not distributed # with this file, You can obtain one at http://mozilla.org/MPL/2.0/. import logging from django.core.urlresolvers import reverse from django.db import transaction from django.http.response import HttpResponseBadRequest, HttpResponseRedirect from django.views.generic.base import TemplateView from website.apps.home.utils import load_simulation_file logger = logging.getLogger(__name__) class UploadView(TemplateView): template_name = "../templates/simulation/upload.html" @transaction.atomic def post(self, request, *args, **kwargs): if request.method == 'POST': if not request.FILES['output_file']: return HttpResponseBadRequest("No 'output_file' is provided") else: sim_name = self.request.POST.get(u"name", None) is_historical = self.request.POST.get("historical") load_simulation_file(request.FILES['output_file'], simulation_name=sim_name, is_historical=is_historical) return HttpResponseRedirect(reverse('home.display_simulations')) else: return HttpResponseRedirect("")
Python
0
c9a915692b30458717ead2f83fce77ce295e5ed9
add recipe_folder member (#10527)
conans/pylint_plugin.py
conans/pylint_plugin.py
"""Pylint plugin for ConanFile""" import astroid from astroid import MANAGER def register(linter): """Declare package as plugin This function needs to be declared so astroid treats current file as a plugin. """ pass def transform_conanfile(node): """Transform definition of ConanFile class so dynamic fields are visible to pylint""" str_class = astroid.builtin_lookup("str") info_class = MANAGER.ast_from_module_name("conans.model.info").lookup( "ConanInfo") build_requires_class = MANAGER.ast_from_module_name( "conans.client.graph.graph_manager").lookup("_RecipeBuildRequires") file_copier_class = MANAGER.ast_from_module_name( "conans.client.file_copier").lookup("FileCopier") file_importer_class = MANAGER.ast_from_module_name( "conans.client.importer").lookup("_FileImporter") python_requires_class = MANAGER.ast_from_module_name( "conans.client.graph.python_requires").lookup("PyRequires") dynamic_fields = { "conan_data": str_class, "build_requires": build_requires_class, "info_build": info_class, "info": info_class, "copy": file_copier_class, "copy_deps": file_importer_class, "python_requires": [str_class, python_requires_class], "recipe_folder": str_class, } for f, t in dynamic_fields.items(): node.locals[f] = [t] MANAGER.register_transform( astroid.ClassDef, transform_conanfile, lambda node: node.qname() == "conans.model.conan_file.ConanFile") def _python_requires_member(): return astroid.parse(""" from conans.client.graph.python_requires import ConanPythonRequire python_requires = ConanPythonRequire() """) astroid.register_module_extender(astroid.MANAGER, "conans", _python_requires_member)
"""Pylint plugin for ConanFile""" import astroid from astroid import MANAGER def register(linter): """Declare package as plugin This function needs to be declared so astroid treats current file as a plugin. """ pass def transform_conanfile(node): """Transform definition of ConanFile class so dynamic fields are visible to pylint""" str_class = astroid.builtin_lookup("str") info_class = MANAGER.ast_from_module_name("conans.model.info").lookup( "ConanInfo") build_requires_class = MANAGER.ast_from_module_name( "conans.client.graph.graph_manager").lookup("_RecipeBuildRequires") file_copier_class = MANAGER.ast_from_module_name( "conans.client.file_copier").lookup("FileCopier") file_importer_class = MANAGER.ast_from_module_name( "conans.client.importer").lookup("_FileImporter") python_requires_class = MANAGER.ast_from_module_name( "conans.client.graph.python_requires").lookup("PyRequires") dynamic_fields = { "conan_data": str_class, "build_requires": build_requires_class, "info_build": info_class, "info": info_class, "copy": file_copier_class, "copy_deps": file_importer_class, "python_requires": [str_class, python_requires_class], } for f, t in dynamic_fields.items(): node.locals[f] = [t] MANAGER.register_transform( astroid.ClassDef, transform_conanfile, lambda node: node.qname() == "conans.model.conan_file.ConanFile") def _python_requires_member(): return astroid.parse(""" from conans.client.graph.python_requires import ConanPythonRequire python_requires = ConanPythonRequire() """) astroid.register_module_extender(astroid.MANAGER, "conans", _python_requires_member)
Python
0
4b5ae262bab0bc0c83555d39400049f20aaca9cd
Add CONVERSATION_LABEL_MAX_LENGTH constant
chatterbot/constants.py
chatterbot/constants.py
""" ChatterBot constants """ ''' The maximum length of characters that the text of a statement can contain. This should be enforced on a per-model basis by the data model for each storage adapter. ''' STATEMENT_TEXT_MAX_LENGTH = 400 ''' The maximum length of characters that the text label of a conversation can contain. The number 32 was chosen because that is the length of the string representation of a UUID4 with no hyphens. ''' CONVERSATION_LABEL_MAX_LENGTH = 32 # The maximum length of characters that the name of a tag can contain TAG_NAME_MAX_LENGTH = 50 DEFAULT_DJANGO_APP_NAME = 'django_chatterbot'
""" ChatterBot constants """ ''' The maximum length of characters that the text of a statement can contain. This should be enforced on a per-model basis by the data model for each storage adapter. ''' STATEMENT_TEXT_MAX_LENGTH = 400 # The maximum length of characters that the name of a tag can contain TAG_NAME_MAX_LENGTH = 50 DEFAULT_DJANGO_APP_NAME = 'django_chatterbot'
Python
0.999974
7a1e57fa5c6d2c6330a73e8fab95c5ef6fa0ea35
Fix indentation
tomviz/python/SetNegativeVoxelsToZero.py
tomviz/python/SetNegativeVoxelsToZero.py
def transform_scalars(dataset): """Set negative voxels to zero""" from tomviz import utils import numpy as np data = utils.get_array(dataset) data[data<0] = 0 #set negative voxels to zero # set the result as the new scalars. utils.set_array(dataset, data)
def transform_scalars(dataset): """Set negative voxels to zero""" from tomviz import utils import numpy as np data = utils.get_array(dataset) data[data<0] = 0 #set negative voxels to zero # set the result as the new scalars. utils.set_array(dataset, data)
Python
0.017244
e504ef393f9f11d243fed88b2e4acc1566ea912c
Delete unread messages
scripts/read.py
scripts/read.py
import time import cache import vkapi from log import datetime_format def main(a, args): dialogs = a.messages.getDialogs(unread=1)['items'] messages = {} users = [] chats = [] for msg in dialogs: def cb(req, resp): messages[req['peer_id']] = resp['items'][::-1] a.messages.getHistory.delayed(peer_id=vkapi.utils.getSender(msg['message']), count=min(msg['unread'], 10)).callback(cb) if 'chat_id' in msg['message']: chats.append(msg['message']['chat_id']) else: users.append(msg['message']['user_id']) uc = cache.UserCache(a, 'online') cc = cache.ConfCache(a) uc.load(users) cc.load(chats) a.sync() mids = [] if dialogs: print('-------------------------\n') else: print('Nothing here') for msg in dialogs: m = msg['message'] if 'chat_id' in m: print('Chat "{}" ({}): {}'.format(cc[m['chat_id']]['title'], m['chat_id'], msg['unread'])) else: print('{} {} ({}){}: {}'.format(uc[m['user_id']]['first_name'], uc[m['user_id']]['last_name'], m['user_id'], ', online' if uc[m['user_id']]['online'] else '', msg['unread'])) print() for i in messages[vkapi.utils.getSender(msg['message'])]: print('[{}] {}'.format(time.strftime(datetime_format, time.localtime(i['date'])), i['body'])) print() if 'chat_id' not in m: mids.append(i['id']) print('-------------------------\n') if 't' in args: print(flush=True) mr = vkapi.MessageReceiver(a) while True: time.sleep(1) for m in mr.getMessages(): if 'chat_id' in m: print('Chat "{}" ({}), {} {}:'.format(cc[m['chat_id']]['title'], m['chat_id'], uc[m['user_id']]['first_name'], uc[m['user_id']]['last_name'])) else: print('{} {} ({}):'.format(uc[m['user_id']]['first_name'], uc[m['user_id']]['last_name'], m['user_id'])) print('[{}] {}'.format(time.strftime(datetime_format, time.localtime(m['date'])), m['body'])) print(flush=True) elif 'd' in args and mids: print('Deleting {} messages'.format(len(mids))) a.messages.delete(message_ids=','.join(map(str, mids)))
import time import cache import vkapi from log import datetime_format def main(a, args): dialogs = a.messages.getDialogs(unread=1)['items'] messages = {} users = [] chats = [] for msg in dialogs: def cb(req, resp): messages[req['peer_id']] = resp['items'][::-1] a.messages.getHistory.delayed(peer_id=vkapi.utils.getSender(msg['message']), count=min(msg['unread'], 10)).callback(cb) if 'chat_id' in msg['message']: chats.append(msg['message']['chat_id']) else: users.append(msg['message']['user_id']) uc = cache.UserCache(a, 'online') cc = cache.ConfCache(a) uc.load(users) cc.load(chats) a.sync() if dialogs: print('-------------------------\n') else: print('Nothing here') for msg in dialogs: m = msg['message'] if 'chat_id' in m: print('Chat "{}" ({}): {}'.format(cc[m['chat_id']]['title'], m['chat_id'], msg['unread'])) else: print('{} {} ({}){}: {}'.format(uc[m['user_id']]['first_name'], uc[m['user_id']]['last_name'], m['user_id'], ', online' if uc[m['user_id']]['online'] else '', msg['unread'])) print() for i in messages[vkapi.utils.getSender(msg['message'])]: print('[{}] {}'.format(time.strftime(datetime_format, time.localtime(i['date'])), i['body'])) print() print('-------------------------\n') if args: print(flush=True) mr = vkapi.MessageReceiver(a) while True: time.sleep(1) for m in mr.getMessages(): if 'chat_id' in m: print('Chat "{}" ({}), {} {}:'.format(cc[m['chat_id']]['title'], m['chat_id'], uc[m['user_id']]['first_name'], uc[m['user_id']]['last_name'])) else: print('{} {} ({}):'.format(uc[m['user_id']]['first_name'], uc[m['user_id']]['last_name'], m['user_id'])) print('[{}] {}'.format(time.strftime(datetime_format, time.localtime(m['date'])), m['body'])) print(flush=True)
Python
0.000015
46e2997cb51e45dc58f5a97cea6642ba64d03188
Fix 9.0 version
purchase_all_shipments/__openerp__.py
purchase_all_shipments/__openerp__.py
# Author: Leonardo Pistone # Copyright 2015 Camptocamp SA # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. {'name': 'Purchase All Shipments', 'version': '9.0.1.0.0', 'author': "Camptocamp,Odoo Community Association (OCA)", 'category': 'Purchases', 'license': 'AGPL-3', 'depends': ['purchase'], 'data': ['view/purchase_order.xml'], }
# Author: Leonardo Pistone # Copyright 2015 Camptocamp SA # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. {'name': 'Purchase All Shipments', 'version': '8.0.1.0.0', 'author': "Camptocamp,Odoo Community Association (OCA)", 'category': 'Purchases', 'license': 'AGPL-3', 'depends': ['purchase'], 'data': ['view/purchase_order.xml'], }
Python
0
5aca45a68a229f43a25dd97d2c680716c9baabf5
add travis env to sgen
scripts/sgen.py
scripts/sgen.py
#!/usr/bin/python # Generate original static file to another with new prefix # ./sgen index.html old_prefix static_index.html new_prefix import sys from os import walk, path, environ # File lists # The two file lists should be aligned. root = environ['TRAVIS_BUILD_DIR'] files = [] for (dirpath, dirname, filenames) in walk( root + "/static"): for f in filenames: if ".html" in f: files.append(dirpath + "/" + f) # prefix of target files target_prefix = root + "/docs" target_files = [] for f in files: target_files.append(f.replace( root + "/static", target_prefix)) print(target_files) # Variables of parsing def parse_args(): if len(sys.argv) < 3: print ("Not enough arguments") exit(1) original_prefix = sys.argv[1] new_prefix = sys.argv[2] # unsafe checkout prefix if original_prefix[0] != 'h' or original_prefix[-1] != '/' or new_prefix[0] != 'h' or new_prefix[-1] != '/': print ("Seems something wrong on the prefix") exit(1) return original_prefix, new_prefix def sgen(): original_prefix, new_prefix = parse_args() # parse the publications_ref into the appropriate html format for i in range(len(files)): with open(files[i]) as f: content = f.read() new_content = content.replace(original_prefix, new_prefix) with open(target_files[i], "w+") as f: f.write(new_content) sgen()
#!/usr/bin/python # Generate original static file to another with new prefix # ./sgen index.html old_prefix static_index.html new_prefix import sys from os import walk, path # File lists # The two file lists should be aligned. files = [] for (dirpath, dirname, filenames) in walk("../static"): for f in filenames: if ".html" in f: files.append(dirpath + "/" + f) # prefix of target files target_prefix = "../docs" target_files = [] for f in files: target_files.append(f.replace("../static", target_prefix)) print(target_files) # Variables of parsing def parse_args(): if len(sys.argv) < 3: print ("Not enough arguments") exit(1) original_prefix = sys.argv[1] new_prefix = sys.argv[2] # unsafe checkout prefix if original_prefix[0] != 'h' or original_prefix[-1] != '/' or new_prefix[0] != 'h' or new_prefix[-1] != '/': print ("Seems something wrong on the prefix") exit(1) return original_prefix, new_prefix def sgen(): original_prefix, new_prefix = parse_args() # parse the publications_ref into the appropriate html format for i in range(len(files)): with open(files[i]) as f: content = f.read() new_content = content.replace(original_prefix, new_prefix) with open(target_files[i], "w+") as f: f.write(new_content) sgen()
Python
0
24cebbd351875103067162733cf682320df29cf6
Update VMfileconvert_V2.py
pyecog/light_code/VMfileconvert_V2.py
pyecog/light_code/VMfileconvert_V2.py
import glob, os, numpy, sys try: import stfio except: sys.path.append('C:\Python27\Lib\site-packages') import stfio def main(): searchpath = os.getcwd() exportdirectory = searchpath+'/ConvertedFiles/' # Make export directory if not os.path.exists(exportdirectory): os.makedirs(exportdirectory) # Walk through and find abf files pattern = '*.abf' datafilenames = glob.glob(pattern) if datafilenames: for filename in datafilenames: print ('Converting '+str(filename)) data = stfio.read(filename,ftype = "abf") x = data.aspandas() x = x.values numpy.save(exportdirectory+filename[0:-4],x) if __name__ == '__main__': main()
import glob, os, numpy import stfio def main(): searchpath = os.getcwd() exportdirectory = searchpath+'/ConvertedFiles/' # Make export directory if not os.path.exists(exportdirectory): os.makedirs(exportdirectory) # Walk through and find abf files pattern = '*.abf' datafilenames = glob.glob(pattern) if datafilenames: for filename in datafilenames: print ('Converting '+str(filename)) data = stfio.read(filename,ftype = "abf") x = data.aspandas() x = x.values numpy.save(exportdirectory+filename[0:-4],x) if __name__ == '__main__': main()
Python
0

Dataset Card for "commits-pjj-2048"

More Information needed

Downloads last month
0
Edit dataset card

Collection including bigcode/commits-pjj-2048