From 977b79bc87ec2fb29b46ddbe361200ff59616026 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Wed, 20 Jun 2018 01:39:56 -0700 Subject: [PATCH 1/4] "add doc for exec" --- python/paddle/fluid/executor.py | 68 +++++++++++++++++++++++++++------ 1 file changed, 56 insertions(+), 12 deletions(-) diff --git a/python/paddle/fluid/executor.py b/python/paddle/fluid/executor.py index 159b0ca39eed5..193595678fcaf 100644 --- a/python/paddle/fluid/executor.py +++ b/python/paddle/fluid/executor.py @@ -205,6 +205,25 @@ def to_name_str(var): class Executor(object): + """ + An Executor in Python, only support the single-GPU running. For multi-cards, please refer to + ParallelExecutor. + Python executor takes a program, add feed operators and fetch operators to this program according + to feed map and fetch_list. Feed map provides input data for the program. fetch_list provides + the variables(or names) that user want to get after program run. Note: the executor will run all + operators in the program but not only the operators dependent by the fetch_list. + It store the global variables into the global scope, and create a local scope for the temporary + variables. The local scope contents will be discarded after every minibatch forward/backward finished. + But the global scope variables will be persistent through different runs. + All of ops in program will be running in sequence. + + Args: + place(core.CPUPlace|core.CUDAPlace(n)): indicate the executor run on which device + + Note: For debugging complicated network in parallel-GPUs, you can test it on the executor. + They has the exactly same arguments, and expected the same results. + """ + def __init__(self, place): self.place = place p = core.Place() @@ -304,23 +323,48 @@ def run(self, scope=None, return_numpy=True, use_program_cache=False): - """ Run program by this Executor. Feed data by feed map, fetch result by fetch_list. - + """ + Run program by this Executor. Feed data by feed map, fetch result by fetch_list. Python executor takes a program, add feed operators and fetch operators to this program according to feed map and fetch_list. Feed map provides input data for the program. fetch_list provides - the variables(or names) that user want to get after program run. Note: the executor will run all + the variables(or names) that user want to get after program run. + + Note: the executor will run all operators in the program but not only the operators dependent by the fetch_list - :param program: the program that need to run, if not provied, then default_main_program will be used. - :param feed: feed variable map, e.g. {"image": ImageData, "label": LableData} - :param fetch_list: a list of variable or variable names that user want to get, run will return them according + Args: + program(Program): the program that need to run, if not provied, then default_main_program will be used. + feed(dict): feed variable map, e.g. {"image": ImageData, "label": LableData} + fetch_list(list): a list of variable or variable names that user want to get, run will return them according to this list. - :param feed_var_name: the name for the input variable of feed Operator. - :param fetch_var_name: the name for the output variable of feed Operator. - :param scope: the scope used to run this program, you can switch it to different scope. default is global_scope - :param return_numpy: if convert the fetched tensor to numpy - :param use_program_cache: set use_program_cache to true if program not changed compare to the last step. - :return: result according to fetch_list. + feed_var_name(str): the name for the input variable of feed Operator. + fetch_var_name(str): the name for the output variable of fetch Operator. + scope(Scope): the scope used to run this program, you can switch it to different scope. default is global_scope + return_numpy(bool): if convert the fetched tensor to numpy + use_program_cache(bool): set use_program_cache to true if program not changed compare to the last step. + + Returns: + + list(numpy.array): fetch result according to fetch_list. + + + Examples: + .. code-block:: python + data = layers.data(name='X', shape=[1], dtype='float32') + hidden = layers.fc(input=data, size=10) + layers.assign(hidden, out) + loss = layers.mean(out) + adam = fluid.optimizer.Adam() + adam.minimize(loss) + + cpu = core.CPUPlace() + exe = Executor(cpu) + exe.run(default_startup_program()) + + x = numpy.random.random(size=(10, 1)).astype('float32') + outs = exe.run( + feed={'X': x}, + fetch_list=[loss.name]) """ if feed is None: feed = {} From 817ceab17a93d4369cca6976d5fdeb4e572c560f Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Wed, 20 Jun 2018 02:04:44 -0700 Subject: [PATCH 2/4] "add more changes" --- python/paddle/fluid/executor.py | 44 ++++++++++++++++++++++++++++----- 1 file changed, 38 insertions(+), 6 deletions(-) diff --git a/python/paddle/fluid/executor.py b/python/paddle/fluid/executor.py index 193595678fcaf..955bd6b35ff0f 100644 --- a/python/paddle/fluid/executor.py +++ b/python/paddle/fluid/executor.py @@ -18,7 +18,7 @@ from . import core __all__ = [ - 'Executor', 'global_scope', 'scope_guard', 'switch_scope', 'fetch_var' + 'Executor', 'global_scope', 'scope_guard', '_switch_scope', 'fetch_var' ] g_scope = core.Scope() @@ -35,7 +35,7 @@ def global_scope(): return g_scope -def switch_scope(scope): +def _switch_scope(scope): global g_scope ex = g_scope g_scope = scope @@ -57,12 +57,27 @@ def scope_guard(scope): Args: scope: The new global/default scope. """ - ex = switch_scope(scope) + ex = _switch_scope(scope) yield - switch_scope(ex) + _switch_scope(ex) def as_numpy(tensor): + """ + Convert a Tensor to a numpy.ndarray, its only support Tensor without LoD information. + For higher dimensional sequence data, please use LoDTensor directly. + Examples: + >>> import paddle.fluid as fluid + >>> outs = executor.run(...) + >>> np_outs = map(lambda x: as_numpy(x), outs) + >>> ... + + Args: + tensor(Variable): a instance of Tensor + + Returns: + numpy.ndarray + """ if isinstance(tensor, list): return [as_numpy(t) for t in tensor] assert isinstance(tensor, core.LoDTensor) @@ -186,7 +201,7 @@ def fetch_var(name, scope=None, return_numpy=True): return tensor -def get_program_cache_key(feed, fetch_list): +def _get_program_cache_key(feed, fetch_list): feed_var_names = feed.keys() def to_name_str(var): @@ -232,6 +247,23 @@ def __init__(self, place): self.program_caches = dict() def as_lodtensor(self, data): + """ + Convert numpy.ndarray to Tensor, its only support Tensor without LoD information. + For higher dimensional sequence data, please use LoDTensor directly. + + Examples: + >>> import paddle.fluid as fluid + >>> exe = fluid.executor(fluid.CPUPlace()) + >>> data = np.array(size=(100, 200, 300)) + >>> np_outs = map(lambda x: exec.as_lodtensor(x), data) + >>> ... + + Args: + data(numpy.ndarray): a instance of array + + Returns: + LoDTensor + """ if isinstance(data, list): raise RuntimeError("Some of your feed data hold LoD information. \ They can not be completely cast from a list of Python \ @@ -385,7 +417,7 @@ def run(self, if scope is None: scope = global_scope() - cache_key = get_program_cache_key(feed, fetch_list) + cache_key = _get_program_cache_key(feed, fetch_list) if use_program_cache: cached_program = self._get_program_cache(cache_key) if cached_program is None: From e5f8ec3e10822e345193ef47b32177dc076035f6 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Wed, 20 Jun 2018 02:27:58 -0700 Subject: [PATCH 3/4] "fix based on preview" --- python/paddle/fluid/executor.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/python/paddle/fluid/executor.py b/python/paddle/fluid/executor.py index 955bd6b35ff0f..39c4f90a5f94a 100644 --- a/python/paddle/fluid/executor.py +++ b/python/paddle/fluid/executor.py @@ -255,7 +255,7 @@ def as_lodtensor(self, data): >>> import paddle.fluid as fluid >>> exe = fluid.executor(fluid.CPUPlace()) >>> data = np.array(size=(100, 200, 300)) - >>> np_outs = map(lambda x: exec.as_lodtensor(x), data) + >>> np_outs = map(lambda x: exe.as_lodtensor(x), data) >>> ... Args: @@ -367,8 +367,7 @@ def run(self, Args: program(Program): the program that need to run, if not provied, then default_main_program will be used. feed(dict): feed variable map, e.g. {"image": ImageData, "label": LableData} - fetch_list(list): a list of variable or variable names that user want to get, run will return them according - to this list. + fetch_list(list): a list of variable or variable names that user want to get, run will return them according to this list. feed_var_name(str): the name for the input variable of feed Operator. fetch_var_name(str): the name for the output variable of fetch Operator. scope(Scope): the scope used to run this program, you can switch it to different scope. default is global_scope @@ -382,6 +381,7 @@ def run(self, Examples: .. code-block:: python + data = layers.data(name='X', shape=[1], dtype='float32') hidden = layers.fc(input=data, size=10) layers.assign(hidden, out) From a07bbd7aa04d77a6f7d6855e4c90ddb20c7202f8 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Wed, 20 Jun 2018 02:32:45 -0700 Subject: [PATCH 4/4] "chagne code format" --- python/paddle/fluid/executor.py | 33 ++++++++++++++++----------------- 1 file changed, 16 insertions(+), 17 deletions(-) diff --git a/python/paddle/fluid/executor.py b/python/paddle/fluid/executor.py index 39c4f90a5f94a..dc275674618ee 100644 --- a/python/paddle/fluid/executor.py +++ b/python/paddle/fluid/executor.py @@ -380,23 +380,22 @@ def run(self, Examples: - .. code-block:: python - - data = layers.data(name='X', shape=[1], dtype='float32') - hidden = layers.fc(input=data, size=10) - layers.assign(hidden, out) - loss = layers.mean(out) - adam = fluid.optimizer.Adam() - adam.minimize(loss) - - cpu = core.CPUPlace() - exe = Executor(cpu) - exe.run(default_startup_program()) - - x = numpy.random.random(size=(10, 1)).astype('float32') - outs = exe.run( - feed={'X': x}, - fetch_list=[loss.name]) + + >>> data = layers.data(name='X', shape=[1], dtype='float32') + >>> hidden = layers.fc(input=data, size=10) + >>> layers.assign(hidden, out) + >>> loss = layers.mean(out) + >>> adam = fluid.optimizer.Adam() + >>> adam.minimize(loss) + + >>> cpu = core.CPUPlace() + >>> exe = Executor(cpu) + >>> exe.run(default_startup_program()) + + >>> x = numpy.random.random(size=(10, 1)).astype('float32') + >>> outs = exe.run( + >>> feed={'X': x}, + >>> fetch_list=[loss.name]) """ if feed is None: feed = {}