mxprepost provides a unified, high-performance interface for YOLO pre-processing and post-processing, designed for seamless integration with MemryX MXA applications across YOLOv8–YOLOv11 tasks such as detection, segmentation, and pose estimation.
It handles tasks like:
- Pre-processing: Frame preparation (e.g., resizing, normalization).
- Post-processing: Model output decoding, Non-Maximum Suppression (NMS), class filtering, and result annotation.
It is intended to be used inside the MXA input/output callbacks (via mxapi.MxAccl). Under the hood, optimized C++ bindings accelerate compute-intensive operations such as NMS and output decoding.
-
Create
mxapi.MxAccl -
Connect input/output callbacks
-
Initialize
mxprepost.MxPrepostwith the accelerator -
Call:
preprocess(frame)in input callbackpostprocess(mxa_output, ori_height, ori_width)orpostprocess(mxa_output, ori_frame)in output callbackdraw(frame, result)for visualization
import cv2
import mxprepost
from memryx import mxapi
class App:
def __init__(self, dfp_path, video_path):
# Open video source
self.cap = cv2.VideoCapture(video_path)
# Store original frame dimensions
self.ori_width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
self.ori_height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# Create MXA accelerator
self.accl = mxapi.MxAccl(dfp_path, [0], [False, False], False)
#---------------------------------------------------------------
#-------------------------- MxPrepost --------------------------
#---------------------------------------------------------------
# Initialize MxPrepost
self.pp = mxprepost.MxPrepost(
accl=self.accl,
task="yolov8-det"
)
# Connect callbacks
self.accl.connect_stream(self.in_callback, self.out_callback, stream_id=0)
def in_callback(self, stream_id):
ret, frame = self.cap.read()
if not ret:
return None
#---------------------------------------------------------------
#-------------------------- MxPrepost --------------------------
#---------------------------------------------------------------
return self.pp.preprocess(frame)
def out_callback(self, mxa_output, stream_id):
#---------------------------------------------------------------
#-------------------------- MxPrepost --------------------------
#---------------------------------------------------------------
# Postprocess using original dimensions
result = self.pp.postprocess(
mxa_output,
self.ori_height,
self.ori_width
)
# Access outputs (depending on task)
# ---------------------------
# Detection (Bounding Boxes)
# ---------------------------
if result.boxes is not None:
for box in result.boxes:
print("xyxy:", box.xyxy) # [x1, y1, x2, y2]
print("xywh:", box.xywh) # [x_center, y_center, w, h]
print("conf:", box.conf) # confidence score
print("cls_id:", box.cls_id) # class index
print("cls_name:", box.cls_name)# class label string
print("----")
# ---------------------------
# Segmentation (Masks)
# ---------------------------
if result.masks is not None:
for mask in result.masks:
# mask is a polygon (list of Point2f), not a binary HxW array
pts = [(p.x, p.y) for p in mask.xys]
print("mask cls_id:", int(mask.cls_id))
print("num polygon points:", len(pts))
if pts:
print("first 5 points:", pts[:5])
print("----")
# ---------------------------
# Pose Estimation (Keypoints)
# ---------------------------
if result.keypoints is not None:
for det_id, kps in enumerate(result.keypoints):
print(f"detection {det_id} keypoints:", len(kps))
# each kp has kp.xy (Point2f) and kp.conf
for kp_id, kp in enumerate(kps):
print(f" kp[{kp_id}] = (x={kp.xy.x:.2f}, y={kp.xy.y:.2f}, conf={kp.conf:.3f})")
print("----")
# Optional: draw detections
# annotated = self.pp.draw(frame, result)prepost = mxprepost.MxPrepost(
accl=accl, # [required] mxapi.MxAccl instance
task="yolov8-det", # [required] Expected format: yolov<n>-[det|seg|pose]
conf=0.3, # [optional] Default 0.3
iou=0.4, # [optional] Default 0.4
classmap_path="/path/to/classmap.txt" # [optional] Path to a .txt file containing custom class names (one per line). Defaults to COCO dataset.
valid_classes=[0], # [optional] List of class IDs to return. All other detections will be ignored (e.g., [0] for person only in COCO dataset).
model_id=0 # [optional] model_id in the .dfp file. Default 0
# class_agnostic=True, # [optional] Default is False, set to True if your model is class-agnostic
)-
taskexamples:yolov8-detyolov11-detyolov11-pose
-
model_idis required only if multiple models are compiled into the same DFP.
git clone --recurse-submodules git@github.com:memryx/MxPrepost.git
cd MxPrepost# activate your virtualenv with MemryX SDK 2.2
source ~/.mx/bin/activate
cd pymodule
mkdir build && cd build
cmake ..
make -jcd MxPrepost/samples/python
# link mxprepost module in same directory as you targeted python code.
ln -sfv ../../pymodule/build/mxprepost.cpython-*.so .Copy the .so file to the same path as your python code if working in another directory.
- Prefer
postprocess(ofmaps, result, ori_h, ori_w)when you already know original frame size. - Use
postprocess(ofmaps, result, original_frame)if you have the originalcv::Matavailable in the output callback.
SyncAcclAsyncAcclMultistreamAsyncAccl
Only mxapi.MxAccl is supported.
- ✅ YOLOv7 (det only)
- ✅ YOLOv8 (det / pose)
- ✅ YOLOv9 (det only)
- ✅ YOLOv10 (det only)
- ✅ YOLOv11 (det / pose)
- ✅ Custom detection datasets