Skip to content

Commit d08a688

Browse files
committed
Merge 'origin/develop' into ir_frame_control. Update core/FW/style
2 parents 767ac96 + 6d893a8 commit d08a688

22 files changed

+397
-19
lines changed

CMakeLists.txt

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ endif()
3535

3636
# Pybindings project
3737
set(TARGET_NAME depthai)
38-
project(depthai VERSION "1") # revision of bindings [depthai-core].[rev]
38+
project(depthai VERSION "0") # revision of bindings [depthai-core].[rev]
3939

4040
# Set default build type depending on context
4141
set(default_build_type "Release")
@@ -100,6 +100,7 @@ pybind11_add_module(${TARGET_NAME}
100100
src/pipeline/AssetManagerBindings.cpp
101101
src/openvino/OpenVINOBindings.cpp
102102
src/log/LogBindings.cpp
103+
src/VersionBindings.cpp
103104

104105
src/pipeline/node/NodeBindings.cpp
105106

@@ -124,6 +125,7 @@ pybind11_add_module(${TARGET_NAME}
124125
src/pipeline/node/FeatureTrackerBindings.cpp
125126
src/pipeline/node/AprilTagBindings.cpp
126127
src/pipeline/node/DetectionParserBindings.cpp
128+
src/pipeline/node/WarpBindings.cpp
127129

128130
src/pipeline/datatype/ADatatypeBindings.cpp
129131
src/pipeline/datatype/AprilTagConfigBindings.cpp

docs/source/samples/SpatialDetection/spatial_calculator_multi_roi.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
Spatial Calculator Multi-ROI
22
============================
33

4-
This example shows how one can use multiple ROIs with a single :ref:`SpatailLocationCalculator` node. A similar logic could be used as a simple depth line
4+
This example shows how one can use multiple ROIs with a single :ref:`SpatialLocationCalculator` node. A similar logic could be used as a simple depth line
55
scanning camera for mobile robots.
66

77
.. rubric:: Similar samples:

docs/source/tutorials/low-latency.rst

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -108,9 +108,10 @@ Encoded frames
108108
- 7.5
109109
- `link <https://user-images.githubusercontent.com/18037362/162675335-2e5a9581-972a-448c-b650-6b6d076a04b8.png>`__
110110

111-
You can also reduce frame latency by using `Zero-Copy <https://github.com/luxonis/depthai-python/tree/tmp_zero_copy>`__
111+
You can also reduce frame latency by using `Zero-Copy <https://github.com/luxonis/depthai-python/tree/message_zero_copy>`__
112112
branch of the DepthAI. This will pass pointers (at XLink level) to cv2.Mat instead of doing memcopy (as it currently does),
113113
so performance improvement would depend on the image sizes you are using.
114+
(Note: API differs and not all functionality is available as is on the `message_zero_copy` branch)
114115

115116

116117
Reducing latency when running NN

examples/ColorCamera/rgb_camera_control.py

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,8 @@
3232
For the 'Select control: ...' options, use these keys to modify the value:
3333
'-' or '_' to decrease
3434
'+' or '=' to increase
35+
36+
'/' to toggle showing camera settings: exposure, ISO, lens position, color temperature
3537
"""
3638

3739
import depthai as dai
@@ -117,6 +119,7 @@ def clamp(num, v0, v1):
117119
luma_denoise = 0
118120
chroma_denoise = 0
119121
control = 'none'
122+
show = False
120123

121124
awb_mode = cycle([item for name, item in vars(dai.CameraControl.AutoWhiteBalanceMode).items() if name.isupper()])
122125
anti_banding_mode = cycle([item for name, item in vars(dai.CameraControl.AntiBandingMode).items() if name.isupper()])
@@ -129,6 +132,13 @@ def clamp(num, v0, v1):
129132

130133
ispFrames = ispQueue.tryGetAll()
131134
for ispFrame in ispFrames:
135+
if show:
136+
txt = f"[{ispFrame.getSequenceNum()}] "
137+
txt += f"Exposure: {ispFrame.getExposureTime().total_seconds()*1000:.3f} ms, "
138+
txt += f"ISO: {ispFrame.getSensitivity()}, "
139+
txt += f"Lens position: {ispFrame.getLensPosition()}, "
140+
txt += f"Color temp: {ispFrame.getColorTemperature()} K"
141+
print(txt)
132142
cv2.imshow('isp', ispFrame.getCvFrame())
133143

134144
# Send new cfg to camera
@@ -150,6 +160,9 @@ def clamp(num, v0, v1):
150160
key = cv2.waitKey(1)
151161
if key == ord('q'):
152162
break
163+
elif key == ord('/'):
164+
show = not show
165+
if not show: print("Printing camera settings: OFF")
153166
elif key == ord('c'):
154167
ctrl = dai.CameraControl()
155168
ctrl.setCaptureStill(True)

examples/ColorCamera/rgb_preview.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,9 @@
2626
print('Connected cameras: ', device.getConnectedCameras())
2727
# Print out usb speed
2828
print('Usb speed: ', device.getUsbSpeed().name)
29+
# Bootloader version
30+
if device.getBootloaderVersion() is not None:
31+
print('Bootloader version: ', device.getBootloaderVersion())
2932

3033
# Output queue will be used to get the rgb frames from the output defined above
3134
qRgb = device.getOutputQueue(name="rgb", maxSize=4, blocking=False)
Lines changed: 88 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,88 @@
1+
import cv2
2+
import depthai as dai
3+
import numpy as np
4+
5+
camRes = dai.ColorCameraProperties.SensorResolution.THE_1080_P
6+
camSocket = dai.CameraBoardSocket.RGB
7+
ispScale = (1,2)
8+
9+
def getMesh(calibData, ispSize):
10+
M1 = np.array(calibData.getCameraIntrinsics(camSocket, ispSize[0], ispSize[1]))
11+
d1 = np.array(calibData.getDistortionCoefficients(camSocket))
12+
R1 = np.identity(3)
13+
mapX, mapY = cv2.initUndistortRectifyMap(M1, d1, R1, M1, ispSize, cv2.CV_32FC1)
14+
15+
meshCellSize = 16
16+
mesh0 = []
17+
# Creates subsampled mesh which will be loaded on to device to undistort the image
18+
for y in range(mapX.shape[0] + 1): # iterating over height of the image
19+
if y % meshCellSize == 0:
20+
rowLeft = []
21+
for x in range(mapX.shape[1]): # iterating over width of the image
22+
if x % meshCellSize == 0:
23+
if y == mapX.shape[0] and x == mapX.shape[1]:
24+
rowLeft.append(mapX[y - 1, x - 1])
25+
rowLeft.append(mapY[y - 1, x - 1])
26+
elif y == mapX.shape[0]:
27+
rowLeft.append(mapX[y - 1, x])
28+
rowLeft.append(mapY[y - 1, x])
29+
elif x == mapX.shape[1]:
30+
rowLeft.append(mapX[y, x - 1])
31+
rowLeft.append(mapY[y, x - 1])
32+
else:
33+
rowLeft.append(mapX[y, x])
34+
rowLeft.append(mapY[y, x])
35+
if (mapX.shape[1] % meshCellSize) % 2 != 0:
36+
rowLeft.append(0)
37+
rowLeft.append(0)
38+
39+
mesh0.append(rowLeft)
40+
41+
mesh0 = np.array(mesh0)
42+
meshWidth = mesh0.shape[1] // 2
43+
meshHeight = mesh0.shape[0]
44+
mesh0.resize(meshWidth * meshHeight, 2)
45+
46+
mesh = list(map(tuple, mesh0))
47+
48+
return mesh, meshWidth, meshHeight
49+
50+
def create_pipeline(calibData):
51+
pipeline = dai.Pipeline()
52+
53+
cam = pipeline.create(dai.node.ColorCamera)
54+
cam.setIspScale(ispScale)
55+
cam.setBoardSocket(camSocket)
56+
cam.setResolution(camRes)
57+
58+
manip = pipeline.create(dai.node.ImageManip)
59+
mesh, meshWidth, meshHeight = getMesh(calibData, cam.getIspSize())
60+
manip.setWarpMesh(mesh, meshWidth, meshHeight)
61+
manip.setMaxOutputFrameSize(cam.getIspWidth() * cam.getIspHeight() * 3 // 2)
62+
cam.isp.link(manip.inputImage)
63+
64+
cam_xout = pipeline.create(dai.node.XLinkOut)
65+
cam_xout.setStreamName("Undistorted")
66+
manip.out.link(cam_xout.input)
67+
68+
dist_xout = pipeline.create(dai.node.XLinkOut)
69+
dist_xout.setStreamName("Distorted")
70+
cam.isp.link(dist_xout.input)
71+
72+
return pipeline
73+
74+
with dai.Device() as device:
75+
76+
calibData = device.readCalibration()
77+
pipeline = create_pipeline(calibData)
78+
device.startPipeline(pipeline)
79+
80+
queues = [device.getOutputQueue(name, 4, False) for name in ['Undistorted', 'Distorted']]
81+
82+
while True:
83+
for q in queues:
84+
frame = q.get().getCvFrame()
85+
cv2.imshow(q.getName(), frame)
86+
87+
if cv2.waitKey(1) == ord('q'):
88+
break

examples/ImageManip/image_manip_warp_mesh.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
maxFrameSize = camRgb.getPreviewWidth() * camRgb.getPreviewHeight() * 3
1313

1414
# Warp preview frame 1
15-
manip1 = pipeline.create(dai.node.ImageManip)
15+
manip1 = pipeline.create(dai.node.Warp)
1616
# Create a custom warp mesh
1717
tl = dai.Point2f(20, 20)
1818
tr = dai.Point2f(460, 20)

examples/Warp/warp_mesh.py

Lines changed: 68 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,68 @@
1+
#!/usr/bin/env python3
2+
import cv2
3+
import depthai as dai
4+
import numpy as np
5+
6+
# Create pipeline
7+
pipeline = dai.Pipeline()
8+
9+
camRgb = pipeline.create(dai.node.ColorCamera)
10+
camRgb.setPreviewSize(496, 496)
11+
camRgb.setInterleaved(False)
12+
maxFrameSize = camRgb.getPreviewWidth() * camRgb.getPreviewHeight() * 3
13+
14+
# Warp preview frame 1
15+
warp1 = pipeline.create(dai.node.Warp)
16+
# Create a custom warp mesh
17+
tl = dai.Point2f(20, 20)
18+
tr = dai.Point2f(460, 20)
19+
ml = dai.Point2f(100, 250)
20+
mr = dai.Point2f(400, 250)
21+
bl = dai.Point2f(20, 460)
22+
br = dai.Point2f(460, 460)
23+
warp1.setWarpMesh([tl,tr,ml,mr,bl,br], 2, 3)
24+
WARP1_OUTPUT_FRAME_SIZE = (992,500)
25+
warp1.setOutputSize(WARP1_OUTPUT_FRAME_SIZE)
26+
warp1.setMaxOutputFrameSize(WARP1_OUTPUT_FRAME_SIZE[0] * WARP1_OUTPUT_FRAME_SIZE[1] * 3)
27+
warp1.setHwIds([1])
28+
warp1.setInterpolation(dai.node.Warp.Properties.Interpolation.BYPASS)
29+
30+
camRgb.preview.link(warp1.inputImage)
31+
xout1 = pipeline.create(dai.node.XLinkOut)
32+
xout1.setStreamName('out1')
33+
warp1.out.link(xout1.input)
34+
35+
# Warp preview frame 2
36+
warp2 = pipeline.create(dai.node.Warp)
37+
# Create a custom warp mesh
38+
mesh2 = [
39+
(20, 20), (250, 100), (460, 20),
40+
(100, 250), (250, 250), (400, 250),
41+
(20, 480), (250, 400), (460,480)
42+
]
43+
warp2.setWarpMesh(mesh2, 3, 3)
44+
warp2.setMaxOutputFrameSize(maxFrameSize)
45+
warp1.setHwIds([2])
46+
warp2.setInterpolation(dai.node.Warp.Properties.Interpolation.BICUBIC)
47+
48+
camRgb.preview.link(warp2.inputImage);
49+
xout2 = pipeline.create(dai.node.XLinkOut)
50+
xout2.setStreamName('out2')
51+
warp2.out.link(xout2.input)
52+
53+
# Connect to device and start pipeline
54+
with dai.Device(pipeline) as device:
55+
# Output queue will be used to get the rgb frames from the output defined above
56+
q1 = device.getOutputQueue(name="out1", maxSize=8, blocking=False)
57+
q2 = device.getOutputQueue(name="out2", maxSize=8, blocking=False)
58+
59+
while True:
60+
in1 = q1.get()
61+
if in1 is not None:
62+
cv2.imshow("Warped preview 1", in1.getCvFrame())
63+
in2 = q2.get()
64+
if in2 is not None:
65+
cv2.imshow("Warped preview 2", in2.getCvFrame())
66+
67+
if cv2.waitKey(1) == ord('q'):
68+
break

examples/bootloader/bootloader_version.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
print("USB Bootloader - supports only Flash memory")
1515
supportedMemTypes = [dai.DeviceBootloader.Memory.FLASH];
1616
else:
17-
print("NETWORK Bootloader")
17+
print(f"NETWORK Bootloader, is User Bootloader: {bl.isUserBootloader()}")
1818

1919
try:
2020
for mem in supportedMemTypes:

0 commit comments

Comments
 (0)