Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,12 @@ def supports_partitioning_result(
node, partition_list, filter_fn=is_not_qdq_node
)
if is_alone_in_partition:
return activation_supported_on_target(node, neutron_target_spec)
neutron_c = getattr(
custom_delegation_options, "use_new_flow_neutron_c", False
)
return activation_supported_on_target(
node, neutron_target_spec, use_new_flow_neutron_c=neutron_c
)

return True

Expand Down
7 changes: 7 additions & 0 deletions backends/nxp/backend/neutron_converter_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,13 @@ def convert(
if hasattr(cctx.compilationOpts, "useNewFlowNeutronC"):
cctx.compilationOpts.useNewFlowNeutronC = use_new_flow_neutron_c

# Neutron-C debugging outputs
cctx.compilationOpts.verbose = True
cctx.compilationOpts.keepGraphs = True
cctx.compilationOpts.dumpAfterOptimize = "optimized_model.tflite"
cctx.compilationOpts.dumpAfterExtract = "extracted_model.tflite"
cctx.compilationOpts.dumpMicrocode = True

# Try to use multiprocessing for isolation, but fall back to direct execution
# if the environment doesn't support it (e.g., in sandcastle/build environments)
try:
Expand Down
6 changes: 5 additions & 1 deletion backends/nxp/backend/neutron_operator_support.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
# Copyright 2025-2026 NXP
#
# This source code is licensed under the BSD-style license found in the
Expand Down Expand Up @@ -86,13 +86,17 @@


def activation_supported_on_target(
node: Node, neutron_target_spec: NeutronTargetSpec
node: Node, neutron_target_spec: NeutronTargetSpec, use_new_flow_neutron_c: bool = False,
) -> bool:
"""This function determines if the current NeutronSoftware properly supports an activation operator represented by the given node.

:param node: The node representing the activation operator.
:param neutron_target_spec: Object for querying the target platform to retrieve its properties.
"""

if use_new_flow_neutron_c:
return True

input_shape = list(input_tensor(node, 0).shape)
if node.args[0].meta[NXP_NODE_FORMAT].is_channels_first():
input_shape = dims_to_channels_last(input_shape)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
import numpy as np
import pytest
import torch

from executorch.backends.nxp.backend.edge_program_converter import (
EdgeProgramToIRConverter,
exir_ops,
Expand All @@ -21,7 +20,9 @@
ToNCHWPreprocess,
ToNHWCPreprocess,
)
from executorch.backends.nxp.tests.graph_verifier import BaseGraphVerifier
from executorch.backends.nxp.tests.models import Conv2dModule, LinearModule, ReLUModule
from executorch.backends.nxp.tests.nsys_testing import lower_run_compare
from torch.export import ExportedProgram
from executorch.backends.nxp.tests.use_qat import * # noqa F403

Expand Down Expand Up @@ -146,3 +147,21 @@ def test_relu_conversion__unsupported(mocker, input_shape):
# Make sure the `relu` was NOT delegated.
assert not graph_contains_any_of_ops(delegated_ep.graph, [ExecutorchDelegateCall])
assert graph_contains_any_of_ops(delegated_ep.graph, [ReLU])


@pytest.mark.parametrize(
"input_shape",
[
pytest.param(
(3, 9, 7), id="num_channels not divisible by NUM_MACS, alone in partition"
),
],
)
def test_relu_conversion__new_flow_support(mocker, input_shape):
model = ReLUModule()
graph_verifier = BaseGraphVerifier(
exp_num_delegate_call_nodes=1, # Delegated AvgPool.
exp_non_delegated_nodes=[],
)

lower_run_compare(model, input_shape, graph_verifier, use_new_flow_neutron_c=True)
Loading