Skip to content

Commit c166a2f

Browse files
mattroussMatthew Rousseau
and
Matthew Rousseau
authored
Updates for ZED SDK 4.0.7 (#592)
* Updates for ZED SDK 4.0.7 * Remove build directories * Add .gitignore --------- Co-authored-by: Matthew Rousseau <[email protected]>
1 parent abd4831 commit c166a2f

File tree

74 files changed

+12407
-786
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

74 files changed

+12407
-786
lines changed

.gitignore

+635
Large diffs are not rendered by default.

body tracking/body tracking/cpp/src/main.cpp

+3-1
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,9 @@ int main(int argc, char **argv) {
111111

112112
// Configure object detection runtime parameters
113113
BodyTrackingRuntimeParameters body_tracker_parameters_rt;
114-
body_tracker_parameters_rt.detection_confidence_threshold = 40;
114+
body_tracker_parameters_rt.detection_confidence_threshold = 60;
115+
body_tracker_parameters_rt.skeleton_smoothing = 0.7;
116+
115117
// Create ZED Bodies filled in the main loop
116118
Bodies bodies;
117119

body tracking/export/fbx export/cpp/src/main.cpp

+8
Original file line numberDiff line numberDiff line change
@@ -298,6 +298,10 @@ void parseArgs(int argc, char** argv, sl::InitParameters& param) {
298298
param.camera_resolution = sl::RESOLUTION::HD2K;
299299
std::cout << "[Sample] Using Camera in resolution HD2K" << std::endl;
300300
}
301+
else if (arg.find("HD1200") != std::string::npos) {
302+
param.camera_resolution = sl::RESOLUTION::HD1200;
303+
std::cout << "[Sample] Using Camera in resolution HD1200" << std::endl;
304+
}
301305
else if (arg.find("HD1080") != std::string::npos) {
302306
param.camera_resolution = sl::RESOLUTION::HD1080;
303307
std::cout << "[Sample] Using Camera in resolution HD1080" << std::endl;
@@ -306,6 +310,10 @@ void parseArgs(int argc, char** argv, sl::InitParameters& param) {
306310
param.camera_resolution = sl::RESOLUTION::HD720;
307311
std::cout << "[Sample] Using Camera in resolution HD720" << std::endl;
308312
}
313+
else if (arg.find("SVGA") != std::string::npos) {
314+
param.camera_resolution = sl::RESOLUTION::SVGA;
315+
std::cout << "[Sample] Using Camera in resolution SVGA" << std::endl;
316+
}
309317
else if (arg.find("VGA") != std::string::npos) {
310318
param.camera_resolution = sl::RESOLUTION::VGA;
311319
std::cout << "[Sample] Using Camera in resolution VGA" << std::endl;

body tracking/multi-camera/cpp/include/ClientPublisher.hpp

+1-21
Original file line numberDiff line numberDiff line change
@@ -13,36 +13,16 @@ class ClientPublisher{
1313
~ClientPublisher();
1414

1515
bool open(sl::InputType);
16-
1716
void start();
1817
void stop();
19-
20-
sl::Mat getViewRef(){
21-
return view;
22-
}
23-
24-
sl::Mat getPointCloufRef(){
25-
return point_cloud;
26-
}
27-
28-
CUstream getStream() {
29-
return zed.getCUDAStream();
30-
}
18+
void setStartSVOPosition(unsigned pos);
3119

3220
bool isRunning() {
3321
return running;
3422
}
3523

36-
int getSerial() {
37-
return serial;
38-
}
39-
40-
4124
private:
42-
sl::Resolution low_resolution;
43-
sl::Mat point_cloud, view;
4425
sl::Camera zed;
45-
sl::InitParameters init_parameters;
4626
void work();
4727
std::thread runner;
4828
bool running;

body tracking/multi-camera/cpp/include/GLViewer.hpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -162,7 +162,7 @@ class PointCloud {
162162
void initialize(sl::Mat&, sl::float3 clr);
163163
// Push a new point cloud
164164
// Warning: can be called from any thread but the mutex "mutexData" must be locked
165-
void pushNewPC(CUstream);
165+
void pushNewPC();
166166
// Draw the point cloud
167167
// Warning: must be called in the Opengl thread
168168
void draw(const sl::Transform& vp, bool draw_clr);
@@ -192,7 +192,7 @@ class CameraViewer {
192192
// Initialize Opengl and Cuda buffers
193193
bool initialize(sl::Mat& image, sl::float3 clr);
194194
// Push a new Image + Z buffer and transform into a point cloud
195-
void pushNewImage(CUstream);
195+
void pushNewImage();
196196
// Draw the Image
197197
void draw(sl::Transform vpMatrix);
198198
// Close (disable update)
@@ -231,7 +231,7 @@ class GLViewer {
231231
bool isAvailable();
232232
void init(int argc, char **argv);
233233

234-
void setupNewCamera(int, sl::Mat, sl::Mat, CUstream );
234+
void updateCamera(int, sl::Mat &, sl::Mat &);
235235

236236
void updateBodies(sl::Bodies &objs,std::map<sl::CameraIdentifier, sl::Bodies>& singldata, sl::FusionMetrics& metrics);
237237

@@ -301,7 +301,7 @@ class GLViewer {
301301
std::map<int, PointCloud> point_clouds;
302302
std::map<int, CameraViewer> viewers;
303303
std::map<int, sl::Transform> poses;
304-
std::map<int, CUstream> streams;
304+
305305
std::map<int, Simple3DObject> skeletons_raw;
306306
std::map<int, sl::float3> colors;
307307
std::map<int, sl::float3> colors_sk;
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
#pragma once
2+
3+
#include <sl/Camera.hpp>
4+
5+
/**
6+
* @brief Compute the start frame of each SVO for playback to be synced
7+
*
8+
* @param svo_files Map camera index to SVO file path
9+
* @return Map camera index to starting SVO frame for synced playback
10+
*/
11+
std::map<int, int> syncDATA(std::map<int, std::string> svo_files) {
12+
std::map<int, int> output; // map of camera index and frame index of the starting point for each
13+
14+
// Open all SVO
15+
std::map<int, std::shared_ptr<sl::Camera>> p_zeds;
16+
17+
for (auto &it : svo_files) {
18+
auto p_zed = std::make_shared<sl::Camera>();
19+
20+
sl::InitParameters init_param;
21+
init_param.depth_mode = sl::DEPTH_MODE::NONE;
22+
init_param.camera_disable_self_calib = true;
23+
init_param.input.setFromSVOFile(it.second.c_str());
24+
25+
auto error = p_zed->open(init_param);
26+
if (error == sl::ERROR_CODE::SUCCESS)
27+
p_zeds.insert(std::make_pair(it.first, p_zed));
28+
else {
29+
std::cerr << "Could not open file " << it.second.c_str() << ": " << sl::toString(error) << ". Skipping" << std::endl;
30+
}
31+
}
32+
33+
// Compute the starting point, we have to take the latest one
34+
sl::Timestamp start_ts = 0;
35+
for (auto &it : p_zeds) {
36+
it.second->grab();
37+
auto ts = it.second->getTimestamp(sl::TIME_REFERENCE::IMAGE);
38+
39+
if (ts > start_ts)
40+
start_ts = ts;
41+
}
42+
43+
std::cout << "Found SVOs common starting time: " << start_ts << std::endl;
44+
45+
// The starting point is now known, let's find the frame idx for all corresponding
46+
for (auto &it : p_zeds) {
47+
auto frame_position_at_ts = it.second->getSVOPositionAtTimestamp(start_ts);
48+
49+
if (frame_position_at_ts != -1)
50+
output.insert(std::make_pair(it.first, frame_position_at_ts));
51+
}
52+
53+
for (auto &it : p_zeds) it.second->close();
54+
55+
return output;
56+
}

body tracking/multi-camera/cpp/src/ClientPublisher.cpp

+15-23
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22

33
ClientPublisher::ClientPublisher() : running(false)
44
{
5-
init_parameters.depth_mode = sl::DEPTH_MODE::ULTRA;
65
}
76

87
ClientPublisher::~ClientPublisher()
@@ -15,7 +14,11 @@ bool ClientPublisher::open(sl::InputType input) {
1514
if (runner.joinable())
1615
return false;
1716

17+
sl::InitParameters init_parameters;
18+
init_parameters.depth_mode = sl::DEPTH_MODE::ULTRA;
1819
init_parameters.input = input;
20+
if (input.getType() == sl::InputType::INPUT_TYPE::SVO_FILE)
21+
init_parameters.svo_real_time_mode = true;
1922
init_parameters.coordinate_units = sl::UNIT::METER;
2023
init_parameters.coordinate_system = sl::COORDINATE_SYSTEM::RIGHT_HANDED_Y_UP;
2124
auto state = zed.open(init_parameters);
@@ -39,7 +42,7 @@ bool ClientPublisher::open(sl::InputType input) {
3942
// define the body tracking parameters, as the fusion can does the tracking and fitting you don't need to enable them here, unless you need it for your app
4043
sl::BodyTrackingParameters body_tracking_parameters;
4144
body_tracking_parameters.detection_model = sl::BODY_TRACKING_MODEL::HUMAN_BODY_MEDIUM;
42-
body_tracking_parameters.body_format = sl::BODY_FORMAT::BODY_38;
45+
body_tracking_parameters.body_format = sl::BODY_FORMAT::BODY_18;
4346
body_tracking_parameters.enable_body_fitting = false;
4447
body_tracking_parameters.enable_tracking = false;
4548
state = zed.enableBodyTracking(body_tracking_parameters);
@@ -49,20 +52,6 @@ bool ClientPublisher::open(sl::InputType input) {
4952
return false;
5053
}
5154

52-
auto camera_infos = zed.getCameraInformation();
53-
54-
serial = camera_infos.serial_number;
55-
56-
auto resolution = camera_infos.camera_configuration.resolution;
57-
58-
// Define display resolution and check that it fit at least the image resolution
59-
float image_aspect_ratio = resolution.width / (1.f * resolution.height);
60-
int requested_low_res_w = std::min(640, (int)resolution.width);
61-
low_resolution = sl::Resolution(requested_low_res_w, requested_low_res_w / image_aspect_ratio);
62-
63-
view.alloc(low_resolution, sl::MAT_TYPE::U8_C4, sl::MEM::GPU);
64-
point_cloud.alloc(low_resolution, sl::MAT_TYPE::F32_C4, sl::MEM::GPU);
65-
6655
return true;
6756
}
6857

@@ -91,22 +80,25 @@ void ClientPublisher::work()
9180
sl::BodyTrackingRuntimeParameters body_runtime_parameters;
9281
body_runtime_parameters.detection_confidence_threshold = 40;
9382

94-
sl::RuntimeParameters runtime_parameters;
95-
runtime_parameters.confidence_threshold = 30;
9683

9784
// in this sample we use a dummy thread to process the ZED data.
9885
// you can replace it by your own application and use the ZED like you use to, retrieve its images, depth, sensors data and so on.
99-
// as long as you call the grab function and the retrieveBodies (wich run the detection) the camera will be able to seamlessly transmit the data to the fusion module.
86+
// as long as you call the grab function and the retrieveBodies (which runs the detection) the camera will be able to seamlessly transmit the data to the fusion module.
10087
while (running) {
101-
if (zed.grab(runtime_parameters) == sl::ERROR_CODE::SUCCESS) {
88+
if (zed.grab() == sl::ERROR_CODE::SUCCESS) {
10289
/*
10390
Your App
104-
*/
105-
zed.retrieveImage(view, sl::VIEW::LEFT, sl::MEM::GPU, low_resolution);
106-
zed.retrieveMeasure(point_cloud, sl::MEASURE::XYZBGRA, sl::MEM::GPU, low_resolution);
91+
92+
*/
10793

10894
// just be sure to run the bodies detection
10995
zed.retrieveBodies(bodies, body_runtime_parameters);
11096
}
11197
}
11298
}
99+
100+
void ClientPublisher::setStartSVOPosition(unsigned pos) {
101+
zed.setSVOPosition(pos);
102+
zed.grab();
103+
}
104+

body tracking/multi-camera/cpp/src/GLViewer.cpp

+14-11
Original file line numberDiff line numberDiff line change
@@ -221,15 +221,19 @@ sl::float3 newColor(float hh) {
221221
return clr;
222222
}
223223

224-
void GLViewer::setupNewCamera(int id, sl::Mat view, sl::Mat pc, CUstream strm){
224+
void GLViewer::updateCamera(int id, sl::Mat &view, sl::Mat &pc){
225225
mtx.lock();
226226
if (colors.find(id) == colors.end()) {
227227
float hh = uint_dist360(rng) / 60.f;
228228
colors[id] = newColor(hh);
229229
}
230-
viewers[id].initialize(view, colors[id]);
231-
point_clouds[id].initialize(pc, colors[id]);
232-
streams[id] = strm;
230+
231+
if(view.isInit() && viewers.find(id) == viewers.end())
232+
viewers[id].initialize(view, colors[id]);
233+
234+
if(pc.isInit() && point_clouds.find(id) == point_clouds.end())
235+
point_clouds[id].initialize(pc, colors[id]);
236+
233237
mtx.unlock();
234238
}
235239

@@ -281,7 +285,6 @@ void GLViewer::render() {
281285
void GLViewer::setCameraPose(int id, sl::Transform pose) {
282286
mtx.lock();
283287
poses[id] = pose;
284-
285288
if (colors.find(id) == colors.end()) {
286289
float hh = uint_dist360(rng) / 60.f;
287290
colors[id] = newColor(hh);
@@ -436,10 +439,10 @@ void GLViewer::update() {
436439
it.second.pushToGPU();
437440

438441
for(auto &it: point_clouds)
439-
it.second.pushNewPC(streams[it.first]);
442+
it.second.pushNewPC();
440443

441444
for(auto &it: viewers)
442-
it.second.pushNewImage(streams[it.first]);
445+
it.second.pushNewImage();
443446

444447
mtx.unlock();
445448
clearInputs();
@@ -792,9 +795,9 @@ void PointCloud::initialize(sl::Mat &ref, sl::float3 clr_) {
792795
shDrawColor = glGetUniformLocation(shader_.getProgramId(), "u_drawFlat");
793796
}
794797

795-
void PointCloud::pushNewPC(CUstream strm) {
798+
void PointCloud::pushNewPC() {
796799
if (refMat.isInit())
797-
cudaMemcpyAsync(xyzrgbaMappedBuf_, refMat.getPtr<sl::float4>(sl::MEM::GPU), numBytes_, cudaMemcpyDeviceToDevice, strm);
800+
cudaMemcpy(xyzrgbaMappedBuf_, refMat.getPtr<sl::float4>(sl::MEM::CPU), numBytes_, cudaMemcpyHostToDevice);
798801
}
799802

800803
void PointCloud::draw(const sl::Transform& vp, bool draw_flat) {
@@ -949,9 +952,9 @@ bool CameraViewer::initialize(sl::Mat &im, sl::float3 clr) {
949952
return (err == cudaSuccess);
950953
}
951954

952-
void CameraViewer::pushNewImage(CUstream strm) {
955+
void CameraViewer::pushNewImage() {
953956
if (!ref.isInit()) return;
954-
auto err = cudaMemcpy2DToArrayAsync(ArrIm, 0, 0, ref.getPtr<sl::uchar1>(sl::MEM::GPU), ref.getStepBytes(sl::MEM::GPU), ref.getPixelBytes() * ref.getWidth(), ref.getHeight(), cudaMemcpyDeviceToDevice, strm);
957+
auto err = cudaMemcpy2DToArray(ArrIm, 0, 0, ref.getPtr<sl::uchar1>(sl::MEM::CPU), ref.getStepBytes(sl::MEM::CPU), ref.getPixelBytes() * ref.getWidth(), ref.getHeight(), cudaMemcpyHostToDevice);
955958
if (err) std::cout << "err 2 " << err << " " << cudaGetErrorString(err) << "\n";
956959
}
957960

0 commit comments

Comments
 (0)