1 // **************************************************************************
2 // This file is property of and copyright by the ALICE HLT Project *
3 // ALICE Experiment at CERN, All rights reserved. *
5 // Primary Authors: Sergey Gorbunov <sergey.gorbunov@kip.uni-heidelberg.de> *
6 // Ivan Kisel <kisel@kip.uni-heidelberg.de> *
7 // David Rohr <drohr@kip.uni-heidelberg.de> *
8 // for The ALICE HLT Project. *
10 // Permission to use, copy, modify and distribute this software and its *
11 // documentation strictly for non-commercial purposes is hereby granted *
12 // without fee, provided that the above copyright notice appears in all *
13 // copies and that both the copyright notice and this permission notice *
14 // appear in the supporting documentation. The authors make no claims *
15 // about the suitability of this software for any purpose. It is *
16 // provided "as is" without express or implied warranty. *
18 //***************************************************************************
20 #include "AliHLTTPCCAGPUTracker.h"
25 #include <sys/syscall.h>
27 #include "AliHLTTPCCADef.h"
28 #include "AliHLTTPCCAGPUConfig.h"
30 #include <sm_11_atomic_functions.h>
31 #include <sm_12_atomic_functions.h>
35 //Disable assertions since they produce errors in GPU Code
41 __constant__ float4 gAliHLTTPCCATracker[HLTCA_GPU_TRACKER_CONSTANT_MEM / sizeof( float4 )];
42 #ifdef HLTCA_GPU_TEXTURE_FETCH
43 texture<ushort2, 1, cudaReadModeElementType> gAliTexRefu2;
44 texture<unsigned short, 1, cudaReadModeElementType> gAliTexRefu;
45 texture<signed short, 1, cudaReadModeElementType> gAliTexRefs;
48 //Include CXX Files, GPUd() macro will then produce CUDA device code out of the tracker source code
49 #include "AliHLTTPCCATrackParam.cxx"
50 #include "AliHLTTPCCATrack.cxx"
52 #include "AliHLTTPCCAHitArea.cxx"
53 #include "AliHLTTPCCAGrid.cxx"
54 #include "AliHLTTPCCARow.cxx"
55 #include "AliHLTTPCCAParam.cxx"
56 #include "AliHLTTPCCATracker.cxx"
58 #include "AliHLTTPCCAProcess.h"
60 #include "AliHLTTPCCATrackletSelector.cxx"
61 #include "AliHLTTPCCANeighboursFinder.cxx"
62 #include "AliHLTTPCCANeighboursCleaner.cxx"
63 #include "AliHLTTPCCAStartHitsFinder.cxx"
64 #include "AliHLTTPCCAStartHitsSorter.cxx"
65 #include "AliHLTTPCCATrackletConstructor.cxx"
66 #include "AliHLTTPCCASliceOutput.cxx"
68 #include "MemoryAssignmentHelpers.h"
70 #ifndef HLTCA_STANDALONE
71 #include "AliHLTDefinitions.h"
72 #include "AliHLTSystem.h"
75 ClassImp( AliHLTTPCCAGPUTracker )
77 bool AliHLTTPCCAGPUTracker::fgGPUUsed = false;
79 int AliHLTTPCCAGPUTracker::InitGPU(int sliceCount, int forceDeviceID)
81 //Find best CUDA device, initialize and allocate memory
85 HLTWarning("CUDA already used by another AliHLTTPCCAGPUTracker running in same process");
89 fThreadId = (int) syscall (SYS_gettid);
91 cudaDeviceProp fCudaDeviceProp;
93 fGPUMemSize = HLTCA_GPU_ROWS_MEMORY + HLTCA_GPU_COMMON_MEMORY + sliceCount * (HLTCA_GPU_SLICE_DATA_MEMORY + HLTCA_GPU_GLOBAL_MEMORY);
95 #ifndef CUDA_DEVICE_EMULATION
96 int count, bestDevice = -1;
97 long long int bestDeviceSpeed = 0, deviceSpeed;
98 if (CudaFailedMsg(cudaGetDeviceCount(&count)))
100 HLTError("Error getting CUDA Device Count");
104 if (fDebugLevel >= 2) HLTInfo("Available CUDA devices:");
105 for (int i = 0;i < count;i++)
107 unsigned int free, total;
110 cuDeviceGet(&tmpDevice, i);
111 CUcontext tmpContext;
112 cuCtxCreate(&tmpContext, 0, tmpDevice);
113 if(cuMemGetInfo(&free, &total)) std::cout << "Error\n";
114 cuCtxDestroy(tmpContext);
115 CudaFailedMsg(cudaGetDeviceProperties(&fCudaDeviceProp, i));
117 int deviceOK = fCudaDeviceProp.major < 9 && !(fCudaDeviceProp.major < 1 || (fCudaDeviceProp.major == 1 && fCudaDeviceProp.minor < 2)) && free >= fGPUMemSize;
119 if (fDebugLevel >= 2) HLTInfo("%s%2d: %s (Rev: %d.%d - Mem Avail %d / %d)%s", deviceOK ? " " : "[", i, fCudaDeviceProp.name, fCudaDeviceProp.major, fCudaDeviceProp.minor, free, fCudaDeviceProp.totalGlobalMem, deviceOK ? "" : " ]");
120 deviceSpeed = (long long int) fCudaDeviceProp.multiProcessorCount * (long long int) fCudaDeviceProp.clockRate * (long long int) fCudaDeviceProp.warpSize * (long long int) free;
121 if (deviceOK && deviceSpeed > bestDeviceSpeed)
124 bestDeviceSpeed = deviceSpeed;
127 if (bestDevice == -1)
129 HLTWarning("No CUDA Device available, aborting CUDA Initialisation");
135 if (forceDeviceID == -1)
136 cudaDevice = bestDevice;
138 cudaDevice = forceDeviceID;
143 cudaGetDeviceProperties(&fCudaDeviceProp ,cudaDevice );
145 if (fDebugLevel >= 1)
147 HLTInfo("Using CUDA Device %s with Properties:", fCudaDeviceProp.name);
148 HLTInfo("totalGlobalMem = %d", fCudaDeviceProp.totalGlobalMem);
149 HLTInfo("sharedMemPerBlock = %d", fCudaDeviceProp.sharedMemPerBlock);
150 HLTInfo("regsPerBlock = %d", fCudaDeviceProp.regsPerBlock);
151 HLTInfo("warpSize = %d", fCudaDeviceProp.warpSize);
152 HLTInfo("memPitch = %d", fCudaDeviceProp.memPitch);
153 HLTInfo("maxThreadsPerBlock = %d", fCudaDeviceProp.maxThreadsPerBlock);
154 HLTInfo("maxThreadsDim = %d %d %d", fCudaDeviceProp.maxThreadsDim[0], fCudaDeviceProp.maxThreadsDim[1], fCudaDeviceProp.maxThreadsDim[2]);
155 HLTInfo("maxGridSize = %d %d %d", fCudaDeviceProp.maxGridSize[0], fCudaDeviceProp.maxGridSize[1], fCudaDeviceProp.maxGridSize[2]);
156 HLTInfo("totalConstMem = %d", fCudaDeviceProp.totalConstMem);
157 HLTInfo("major = %d", fCudaDeviceProp.major);
158 HLTInfo("minor = %d", fCudaDeviceProp.minor);
159 HLTInfo("clockRate %d= ", fCudaDeviceProp.clockRate);
160 HLTInfo("textureAlignment %d= ", fCudaDeviceProp.textureAlignment);
163 if (fCudaDeviceProp.major < 1 || (fCudaDeviceProp.major == 1 && fCudaDeviceProp.minor < 2))
165 HLTError( "Unsupported CUDA Device" );
170 if (sizeof(AliHLTTPCCATracker) * sliceCount > HLTCA_GPU_TRACKER_OBJECT_MEMORY)
172 HLTError("Insufficiant Tracker Object Memory");
177 if (CudaFailedMsg(cudaSetDevice(cudaDevice)))
179 HLTError("Could not set CUDA Device!");
184 if (fgkNSlices * AliHLTTPCCATracker::CommonMemorySize() > HLTCA_GPU_COMMON_MEMORY)
186 HLTError("Insufficiant Common Memory");
192 if (fgkNSlices * (HLTCA_ROW_COUNT + 1) * sizeof(AliHLTTPCCARow) > HLTCA_GPU_ROWS_MEMORY)
194 HLTError("Insufficiant Row Memory");
200 if (fGPUMemSize > fCudaDeviceProp.totalGlobalMem || CudaFailedMsg(cudaMalloc(&fGPUMemory, (size_t) fGPUMemSize)))
202 HLTError("CUDA Memory Allocation Error");
207 if (fDebugLevel >= 1) HLTInfo("GPU Memory used: %d", (int) fGPUMemSize);
208 int hostMemSize = HLTCA_GPU_ROWS_MEMORY + HLTCA_GPU_COMMON_MEMORY + sliceCount * (HLTCA_GPU_SLICE_DATA_MEMORY + HLTCA_GPU_TRACKS_MEMORY) + HLTCA_GPU_TRACKER_OBJECT_MEMORY;
209 if (CudaFailedMsg(cudaMallocHost(&fHostLockedMemory, hostMemSize)))
211 cudaFree(fGPUMemory);
213 HLTError("Error allocating Page Locked Host Memory");
217 if (fDebugLevel >= 1) HLTInfo("Host Memory used: %d", hostMemSize);
219 if (fDebugLevel >= 1)
221 CudaFailedMsg(cudaMemset(fGPUMemory, 143, (size_t) fGPUMemSize));
224 fSliceCount = sliceCount;
225 //Don't run constructor / destructor here, this will be just local memcopy of Tracker in GPU Memory
226 fGpuTracker = (AliHLTTPCCATracker*) TrackerMemory(fHostLockedMemory, 0);
228 for (int i = 0;i < fgkNSlices;i++)
230 fSlaveTrackers[i].SetGPUTracker();
231 fSlaveTrackers[i].SetGPUTrackerCommonMemory((char*) CommonMemory(fHostLockedMemory, i));
232 fSlaveTrackers[i].SetGPUSliceDataMemory(SliceDataMemory(fHostLockedMemory, i), RowMemory(fHostLockedMemory, i));
235 fpCudaStreams = malloc(CAMath::Max(3, fSliceCount) * sizeof(cudaStream_t));
236 cudaStream_t* const cudaStreams = (cudaStream_t*) fpCudaStreams;
237 for (int i = 0;i < CAMath::Max(3, fSliceCount);i++)
239 if (CudaFailedMsg(cudaStreamCreate(&cudaStreams[i])))
241 cudaFree(fGPUMemory);
242 cudaFreeHost(fHostLockedMemory);
244 HLTError("Error creating CUDA Stream");
250 fCudaInitialized = 1;
251 HLTImportant("CUDA Initialisation successfull (Device %d: %s, Thread %dd)", cudaDevice, fCudaDeviceProp.name, fThreadId);
253 #if defined(HLTCA_STANDALONE) & !defined(CUDA_DEVICE_EMULATION)
256 //Do one initial run for Benchmark reasons
257 const int useDebugLevel = fDebugLevel;
259 AliHLTTPCCAClusterData* tmpCluster = new AliHLTTPCCAClusterData[sliceCount];
263 AliHLTTPCCAParam tmpParam;
264 AliHLTTPCCASliceOutput::outputControlStruct tmpOutputControl;
266 fin.open("events/settings.dump");
269 for (int i = 0;i < sliceCount;i++)
271 fSlaveTrackers[i].SetOutputControl(&tmpOutputControl);
272 tmpParam.ReadSettings(fin);
273 InitializeSliceParam(i, tmpParam);
277 fin.open("eventspbpbc/event.0.dump", std::ifstream::binary);
278 for (int i = 0;i < sliceCount;i++)
280 tmpCluster[i].StartReading(i, 0);
281 tmpCluster[i].ReadEvent(fin);
282 tmpCluster[i].FinishReading();
286 AliHLTTPCCASliceOutput **tmpOutput = new AliHLTTPCCASliceOutput*[sliceCount];
287 memset(tmpOutput, 0, sliceCount * sizeof(AliHLTTPCCASliceOutput*));
289 Reconstruct(tmpOutput, tmpCluster, 0, sliceCount);
290 for (int i = 0;i < sliceCount;i++)
294 fSlaveTrackers[i].SetOutputControl(NULL);
298 fDebugLevel = useDebugLevel;
304 template <class T> inline T* AliHLTTPCCAGPUTracker::alignPointer(T* ptr, int alignment)
306 //Macro to align Pointers.
307 //Will align to start at 1 MB segments, this should be consistent with every alignment in the tracker
308 //(As long as every single data structure is <= 1 MB)
310 size_t adr = (size_t) ptr;
313 adr += alignment - (adr % alignment);
318 bool AliHLTTPCCAGPUTracker::CudaFailedMsg(cudaError_t error)
320 //Check for CUDA Error and in the case of an error display the corresponding error string
321 if (error == cudaSuccess) return(false);
322 HLTWarning("CUDA Error: %d / %s", error, cudaGetErrorString(error));
326 int AliHLTTPCCAGPUTracker::CUDASync(char* state)
328 //Wait for CUDA-Kernel to finish and check for CUDA errors afterwards
330 if (fDebugLevel == 0) return(0);
332 cuErr = cudaGetLastError();
333 if (cuErr != cudaSuccess)
335 HLTError("Cuda Error %s while invoking kernel (%s)", cudaGetErrorString(cuErr), state);
338 if (CudaFailedMsg(cudaThreadSynchronize()))
340 HLTError("CUDA Error while synchronizing (%s)", state);
343 if (fDebugLevel >= 3) HLTInfo("CUDA Sync Done");
347 void AliHLTTPCCAGPUTracker::SetDebugLevel(const int dwLevel, std::ostream* const NewOutFile)
349 //Set Debug Level and Debug output File if applicable
350 fDebugLevel = dwLevel;
351 if (NewOutFile) fOutFile = NewOutFile;
354 int AliHLTTPCCAGPUTracker::SetGPUTrackerOption(char* OptionName, int /*OptionValue*/)
356 //Set a specific GPU Tracker Option
358 HLTError("Unknown Option: %s", OptionName);
361 //No Options used at the moment
365 #ifdef HLTCA_STANDALONE
366 void AliHLTTPCCAGPUTracker::StandalonePerfTime(int iSlice, int i)
368 //Run Performance Query for timer i of slice iSlice
369 if (fDebugLevel >= 1)
371 AliHLTTPCCAStandaloneFramework::StandaloneQueryTime( fSlaveTrackers[iSlice].PerfTimer(i));
375 void AliHLTTPCCAGPUTracker::StandalonePerfTime(int /*iSlice*/, int /*i*/) {}
378 void AliHLTTPCCAGPUTracker::DumpRowBlocks(AliHLTTPCCATracker* tracker, int iSlice, bool check)
380 //Dump Rowblocks to File
381 if (fDebugLevel >= 4)
383 *fOutFile << "RowBlock Tracklets" << std::endl;
385 int4* rowBlockPos = (int4*) malloc(sizeof(int4) * (tracker[iSlice].Param().NRows() / HLTCA_GPU_SCHED_ROW_STEP + 1) * 2);
386 int* rowBlockTracklets = (int*) malloc(sizeof(int) * (tracker[iSlice].Param().NRows() / HLTCA_GPU_SCHED_ROW_STEP + 1) * HLTCA_GPU_MAX_TRACKLETS * 2);
387 uint2* blockStartingTracklet = (uint2*) malloc(sizeof(uint2) * HLTCA_GPU_BLOCK_COUNT);
388 CudaFailedMsg(cudaMemcpy(rowBlockPos, fGpuTracker[iSlice].RowBlockPos(), sizeof(int4) * (tracker[iSlice].Param().NRows() / HLTCA_GPU_SCHED_ROW_STEP + 1) * 2, cudaMemcpyDeviceToHost));
389 CudaFailedMsg(cudaMemcpy(rowBlockTracklets, fGpuTracker[iSlice].RowBlockTracklets(), sizeof(int) * (tracker[iSlice].Param().NRows() / HLTCA_GPU_SCHED_ROW_STEP + 1) * HLTCA_GPU_MAX_TRACKLETS * 2, cudaMemcpyDeviceToHost));
390 CudaFailedMsg(cudaMemcpy(blockStartingTracklet, fGpuTracker[iSlice].BlockStartingTracklet(), sizeof(uint2) * HLTCA_GPU_BLOCK_COUNT, cudaMemcpyDeviceToHost));
391 CudaFailedMsg(cudaMemcpy(tracker[iSlice].CommonMemory(), fGpuTracker[iSlice].CommonMemory(), fGpuTracker[iSlice].CommonMemorySize(), cudaMemcpyDeviceToHost));
393 int k = tracker[iSlice].GPUParameters()->fScheduleFirstDynamicTracklet;
394 for (int i = 0; i < tracker[iSlice].Param().NRows() / HLTCA_GPU_SCHED_ROW_STEP + 1;i++)
396 *fOutFile << "Rowblock: " << i << ", up " << rowBlockPos[i].y << "/" << rowBlockPos[i].x << ", down " <<
397 rowBlockPos[tracker[iSlice].Param().NRows() / HLTCA_GPU_SCHED_ROW_STEP + 1 + i].y << "/" << rowBlockPos[tracker[iSlice].Param().NRows() / HLTCA_GPU_SCHED_ROW_STEP + 1 + i].x << std::endl << "Phase 1: ";
398 for (int j = 0;j < rowBlockPos[i].x;j++)
400 //Use Tracker Object to calculate Offset instead of fGpuTracker, since *fNTracklets of fGpuTracker points to GPU Mem!
401 *fOutFile << rowBlockTracklets[(tracker[iSlice].RowBlockTracklets(0, i) - tracker[iSlice].RowBlockTracklets(0, 0)) + j] << ", ";
402 if (check && rowBlockTracklets[(tracker[iSlice].RowBlockTracklets(0, i) - tracker[iSlice].RowBlockTracklets(0, 0)) + j] != k)
404 HLTError("Wrong starting Row Block %d, entry %d, is %d, should be %d", i, j, rowBlockTracklets[(tracker[iSlice].RowBlockTracklets(0, i) - tracker[iSlice].RowBlockTracklets(0, 0)) + j], k);
407 if (rowBlockTracklets[(tracker[iSlice].RowBlockTracklets(0, i) - tracker[iSlice].RowBlockTracklets(0, 0)) + j] == -1)
409 HLTError("Error, -1 Tracklet found");
412 *fOutFile << std::endl << "Phase 2: ";
413 for (int j = 0;j < rowBlockPos[tracker[iSlice].Param().NRows() / HLTCA_GPU_SCHED_ROW_STEP + 1 + i].x;j++)
415 *fOutFile << rowBlockTracklets[(tracker[iSlice].RowBlockTracklets(1, i) - tracker[iSlice].RowBlockTracklets(0, 0)) + j] << ", ";
417 *fOutFile << std::endl;
422 *fOutFile << "Starting Threads: (First Dynamic: " << tracker[iSlice].GPUParameters()->fScheduleFirstDynamicTracklet << ")" << std::endl;
423 for (int i = 0;i < HLTCA_GPU_BLOCK_COUNT;i++)
425 *fOutFile << i << ": " << blockStartingTracklet[i].x << " - " << blockStartingTracklet[i].y << std::endl;
430 free(rowBlockTracklets);
431 free(blockStartingTracklet);
435 __global__ void PreInitRowBlocks(int4* const RowBlockPos, int* const RowBlockTracklets, int* const SliceDataHitWeights, int nSliceDataHits)
437 //Initialize GPU RowBlocks and HitWeights
438 int4* const rowBlockTracklets4 = (int4*) RowBlockTracklets;
439 int4* const sliceDataHitWeights4 = (int4*) SliceDataHitWeights;
440 const int stride = blockDim.x * gridDim.x;
442 i0.x = i0.y = i0.z = i0.w = 0;
443 i1.x = i1.y = i1.z = i1.w = -1;
444 for (int i = blockIdx.x * blockDim.x + threadIdx.x;i < sizeof(int4) * 2 * (HLTCA_ROW_COUNT / HLTCA_GPU_SCHED_ROW_STEP + 1) / sizeof(int4);i += stride)
446 for (int i = blockIdx.x * blockDim.x + threadIdx.x;i < sizeof(int) * (HLTCA_ROW_COUNT / HLTCA_GPU_SCHED_ROW_STEP + 1) * HLTCA_GPU_MAX_TRACKLETS * 2 / sizeof(int4);i += stride)
447 rowBlockTracklets4[i] = i1;
448 for (int i = blockIdx.x * blockDim.x + threadIdx.x;i < nSliceDataHits * sizeof(int) / sizeof(int4);i += stride)
449 sliceDataHitWeights4[i] = i0;
452 int AliHLTTPCCAGPUTracker::Reconstruct(AliHLTTPCCASliceOutput** pOutput, AliHLTTPCCAClusterData* pClusterData, int firstSlice, int sliceCountLocal)
454 //Primary reconstruction function
455 cudaStream_t* const cudaStreams = (cudaStream_t*) fpCudaStreams;
457 if (sliceCountLocal == -1) sliceCountLocal = fSliceCount;
459 if (!fCudaInitialized)
461 HLTError("GPUTracker not initialized");
464 if (sliceCountLocal > fSliceCount)
466 HLTError("GPU Tracker was initialized to run with %d slices but was called to process %d slices", fSliceCount, sliceCountLocal);
469 if (fThreadId != (int) syscall (SYS_gettid))
471 HLTError("GPUTracker context was initialized by different thread, Initializing Thread: %d, Processing Thread: %d", fThreadId, (int) syscall (SYS_gettid));
475 if (fDebugLevel >= 2) HLTInfo("Running GPU Tracker (Slices %d to %d)", fSlaveTrackers[firstSlice].Param().ISlice(), fSlaveTrackers[firstSlice].Param().ISlice() + sliceCountLocal);
477 if (sliceCountLocal * sizeof(AliHLTTPCCATracker) > HLTCA_GPU_TRACKER_CONSTANT_MEM)
479 HLTError("Insuffissant constant memory (Required %d, Available %d, Tracker %d, Param %d, SliceData %d)", sliceCountLocal * (int) sizeof(AliHLTTPCCATracker), (int) HLTCA_GPU_TRACKER_CONSTANT_MEM, (int) sizeof(AliHLTTPCCATracker), (int) sizeof(AliHLTTPCCAParam), (int) sizeof(AliHLTTPCCASliceData));
483 if (fDebugLevel >= 4)
485 for (int iSlice = 0;iSlice < sliceCountLocal;iSlice++)
487 *fOutFile << std::endl << std::endl << "Slice: " << fSlaveTrackers[firstSlice + iSlice].Param().ISlice() << std::endl;
491 memcpy(fGpuTracker, &fSlaveTrackers[firstSlice], sizeof(AliHLTTPCCATracker) * sliceCountLocal);
493 if (fDebugLevel >= 3) HLTInfo("Allocating GPU Tracker memory and initializing constants");
495 #ifdef HLTCA_GPU_TIME_PROFILE
496 unsigned __int64 a, b, c, d;
497 AliHLTTPCCAStandaloneFramework::StandaloneQueryFreq(&c);
498 AliHLTTPCCAStandaloneFramework::StandaloneQueryTime(&d);
501 for (int iSlice = 0;iSlice < sliceCountLocal;iSlice++)
503 //Make this a GPU Tracker
504 fGpuTracker[iSlice].SetGPUTracker();
505 fGpuTracker[iSlice].SetGPUTrackerCommonMemory((char*) CommonMemory(fGPUMemory, iSlice));
506 fGpuTracker[iSlice].SetGPUSliceDataMemory(SliceDataMemory(fGPUMemory, iSlice), RowMemory(fGPUMemory, iSlice));
507 fGpuTracker[iSlice].SetPointersSliceData(&pClusterData[iSlice], false);
509 //Set Pointers to GPU Memory
510 char* tmpMem = (char*) GlobalMemory(fGPUMemory, iSlice);
512 if (fDebugLevel >= 3) HLTInfo("Initialising GPU Hits Memory");
513 tmpMem = fGpuTracker[iSlice].SetGPUTrackerHitsMemory(tmpMem, pClusterData[iSlice].NumberOfClusters());
514 tmpMem = alignPointer(tmpMem, 1024 * 1024);
516 if (fDebugLevel >= 3) HLTInfo("Initialising GPU Tracklet Memory");
517 tmpMem = fGpuTracker[iSlice].SetGPUTrackerTrackletsMemory(tmpMem, HLTCA_GPU_MAX_TRACKLETS /* *fSlaveTrackers[firstSlice + iSlice].NTracklets()*/);
518 tmpMem = alignPointer(tmpMem, 1024 * 1024);
520 if (fDebugLevel >= 3) HLTInfo("Initialising GPU Track Memory");
521 tmpMem = fGpuTracker[iSlice].SetGPUTrackerTracksMemory(tmpMem, HLTCA_GPU_MAX_TRACKS /* *fSlaveTrackers[firstSlice + iSlice].NTracklets()*/, pClusterData[iSlice].NumberOfClusters());
522 tmpMem = alignPointer(tmpMem, 1024 * 1024);
524 if (fGpuTracker[iSlice].TrackMemorySize() >= HLTCA_GPU_TRACKS_MEMORY)
526 HLTError("Insufficiant Track Memory");
530 if (tmpMem - (char*) GlobalMemory(fGPUMemory, iSlice) > HLTCA_GPU_GLOBAL_MEMORY)
532 HLTError("Insufficiant Global Memory");
536 //Initialize Startup Constants
537 *fSlaveTrackers[firstSlice + iSlice].NTracklets() = 0;
538 *fSlaveTrackers[firstSlice + iSlice].NTracks() = 0;
539 *fSlaveTrackers[firstSlice + iSlice].NTrackHits() = 0;
540 fGpuTracker[iSlice].GPUParametersConst()->fGPUFixedBlockCount = HLTCA_GPU_BLOCK_COUNT * (iSlice + 1) / sliceCountLocal - HLTCA_GPU_BLOCK_COUNT * (iSlice) / sliceCountLocal;
541 if (fDebugLevel >= 3) HLTInfo("Blocks for Slice %d: %d", iSlice, fGpuTracker[iSlice].GPUParametersConst()->fGPUFixedBlockCount);
542 fGpuTracker[iSlice].GPUParametersConst()->fGPUiSlice = iSlice;
543 fGpuTracker[iSlice].GPUParametersConst()->fGPUnSlices = sliceCountLocal;
544 fSlaveTrackers[firstSlice + iSlice].GPUParameters()->fGPUError = 0;
545 fGpuTracker[iSlice].SetGPUTextureBase(fGpuTracker[0].Data().Memory());
548 #ifdef HLTCA_GPU_TEXTURE_FETCH
549 cudaChannelFormatDesc channelDescu2 = cudaCreateChannelDesc<ushort2>();
551 if (CudaFailedMsg(cudaBindTexture(&offset, &gAliTexRefu2, fGpuTracker[0].Data().Memory(), &channelDescu2, sliceCountLocal * HLTCA_GPU_SLICE_DATA_MEMORY)) || offset)
553 HLTError("Error binding CUDA Texture ushort2 (Offset %d)", (int) offset);
556 cudaChannelFormatDesc channelDescu = cudaCreateChannelDesc<unsigned short>();
557 if (CudaFailedMsg(cudaBindTexture(&offset, &gAliTexRefu, fGpuTracker[0].Data().Memory(), &channelDescu, sliceCountLocal * HLTCA_GPU_SLICE_DATA_MEMORY)) || offset)
559 HLTError("Error binding CUDA Texture ushort (Offset %d)", (int) offset);
562 cudaChannelFormatDesc channelDescs = cudaCreateChannelDesc<signed short>();
563 if (CudaFailedMsg(cudaBindTexture(&offset, &gAliTexRefs, fGpuTracker[0].Data().Memory(), &channelDescs, sliceCountLocal * HLTCA_GPU_SLICE_DATA_MEMORY)) || offset)
565 HLTError("Error binding CUDA Texture short (Offset %d)", (int) offset);
570 //Copy Tracker Object to GPU Memory
571 if (fDebugLevel >= 3) HLTInfo("Copying Tracker objects to GPU");
572 #ifdef HLTCA_GPU_TRACKLET_CONSTRUCTOR_DO_PROFILE
573 if (CudaFailedMsg(cudaMalloc(&fGpuTracker[0].fStageAtSync, 100000000))) return(1);
574 CudaFailedMsg(cudaMemset(fGpuTracker[0].fStageAtSync, 0, 100000000));
576 CudaFailedMsg(cudaMemcpyToSymbolAsync(gAliHLTTPCCATracker, fGpuTracker, sizeof(AliHLTTPCCATracker) * sliceCountLocal, 0, cudaMemcpyHostToDevice, cudaStreams[0]));
578 for (int iSlice = 0;iSlice < sliceCountLocal;iSlice++)
580 StandalonePerfTime(firstSlice + iSlice, 0);
582 //Initialize GPU Slave Tracker
583 if (fDebugLevel >= 3) HLTInfo("Creating Slice Data");
584 fSlaveTrackers[firstSlice + iSlice].SetGPUSliceDataMemory(SliceDataMemory(fHostLockedMemory, iSlice), RowMemory(fHostLockedMemory, firstSlice + iSlice));
585 #ifdef HLTCA_GPU_TIME_PROFILE
586 AliHLTTPCCAStandaloneFramework::StandaloneQueryTime(&a);
588 fSlaveTrackers[firstSlice + iSlice].ReadEvent(&pClusterData[iSlice]);
589 #ifdef HLTCA_GPU_TIME_PROFILE
590 AliHLTTPCCAStandaloneFramework::StandaloneQueryTime(&b);
591 printf("Read %f %f\n", ((double) b - (double) a) / (double) c, ((double) a - (double) d) / (double) c);
593 if (fSlaveTrackers[firstSlice + iSlice].Data().MemorySize() > HLTCA_GPU_SLICE_DATA_MEMORY)
595 HLTError("Insufficiant Slice Data Memory");
599 /*if (fSlaveTrackers[firstSlice + iSlice].CheckEmptySlice())
601 if (fDebugLevel >= 3) HLTInfo("Slice Empty, not running GPU Tracker");
602 if (sliceCountLocal == 1)
606 //Initialize temporary memory where needed
607 if (fDebugLevel >= 3) HLTInfo("Copying Slice Data to GPU and initializing temporary memory");
608 PreInitRowBlocks<<<30, 256, 0, cudaStreams[2]>>>(fGpuTracker[iSlice].RowBlockPos(), fGpuTracker[iSlice].RowBlockTracklets(), fGpuTracker[iSlice].Data().HitWeights(), fSlaveTrackers[firstSlice + iSlice].Data().NumberOfHitsPlusAlign());
610 //Copy Data to GPU Global Memory
611 CudaFailedMsg(cudaMemcpyAsync(fGpuTracker[iSlice].CommonMemory(), fSlaveTrackers[firstSlice + iSlice].CommonMemory(), fSlaveTrackers[firstSlice + iSlice].CommonMemorySize(), cudaMemcpyHostToDevice, cudaStreams[iSlice & 1]));
612 CudaFailedMsg(cudaMemcpyAsync(fGpuTracker[iSlice].Data().Memory(), fSlaveTrackers[firstSlice + iSlice].Data().Memory(), fSlaveTrackers[firstSlice + iSlice].Data().GpuMemorySize(), cudaMemcpyHostToDevice, cudaStreams[iSlice & 1]));
613 CudaFailedMsg(cudaMemcpyAsync(fGpuTracker[iSlice].SliceDataRows(), fSlaveTrackers[firstSlice + iSlice].SliceDataRows(), (HLTCA_ROW_COUNT + 1) * sizeof(AliHLTTPCCARow), cudaMemcpyHostToDevice, cudaStreams[iSlice & 1]));
615 if (fDebugLevel >= 4)
617 if (fDebugLevel >= 5) HLTInfo("Allocating Debug Output Memory");
618 fSlaveTrackers[firstSlice + iSlice].TrackletMemory() = reinterpret_cast<char*> ( new uint4 [ fGpuTracker[iSlice].TrackletMemorySize()/sizeof( uint4 ) + 100] );
619 fSlaveTrackers[firstSlice + iSlice].SetPointersTracklets( HLTCA_GPU_MAX_TRACKLETS );
620 fSlaveTrackers[firstSlice + iSlice].HitMemory() = reinterpret_cast<char*> ( new uint4 [ fGpuTracker[iSlice].HitMemorySize()/sizeof( uint4 ) + 100] );
621 fSlaveTrackers[firstSlice + iSlice].SetPointersHits( pClusterData[iSlice].NumberOfClusters() );
624 if (CUDASync("Initialization")) return(1);
625 StandalonePerfTime(firstSlice + iSlice, 1);
627 if (fDebugLevel >= 3) HLTInfo("Running GPU Neighbours Finder");
628 AliHLTTPCCAProcess<AliHLTTPCCANeighboursFinder> <<<fSlaveTrackers[firstSlice + iSlice].Param().NRows(), 256, 0, cudaStreams[iSlice & 1]>>>(iSlice);
630 if (CUDASync("Neighbours finder")) return 1;
632 StandalonePerfTime(firstSlice + iSlice, 2);
634 if (fDebugLevel >= 4)
636 CudaFailedMsg(cudaMemcpy(fSlaveTrackers[firstSlice + iSlice].Data().Memory(), fGpuTracker[iSlice].Data().Memory(), fSlaveTrackers[firstSlice + iSlice].Data().GpuMemorySize(), cudaMemcpyDeviceToHost));
637 fSlaveTrackers[firstSlice + iSlice].DumpLinks(*fOutFile);
640 if (fDebugLevel >= 3) HLTInfo("Running GPU Neighbours Cleaner");
641 AliHLTTPCCAProcess<AliHLTTPCCANeighboursCleaner> <<<fSlaveTrackers[firstSlice + iSlice].Param().NRows()-2, 256, 0, cudaStreams[iSlice & 1]>>>(iSlice);
642 if (CUDASync("Neighbours Cleaner")) return 1;
644 StandalonePerfTime(firstSlice + iSlice, 3);
646 if (fDebugLevel >= 4)
648 CudaFailedMsg(cudaMemcpy(fSlaveTrackers[firstSlice + iSlice].Data().Memory(), fGpuTracker[iSlice].Data().Memory(), fSlaveTrackers[firstSlice + iSlice].Data().GpuMemorySize(), cudaMemcpyDeviceToHost));
649 fSlaveTrackers[firstSlice + iSlice].DumpLinks(*fOutFile);
652 if (fDebugLevel >= 3) HLTInfo("Running GPU Start Hits Finder");
653 AliHLTTPCCAProcess<AliHLTTPCCAStartHitsFinder> <<<fSlaveTrackers[firstSlice + iSlice].Param().NRows()-6, 256, 0, cudaStreams[iSlice & 1]>>>(iSlice);
654 if (CUDASync("Start Hits Finder")) return 1;
656 StandalonePerfTime(firstSlice + iSlice, 4);
658 if (fDebugLevel >= 3) HLTInfo("Running GPU Start Hits Sorter");
659 AliHLTTPCCAProcess<AliHLTTPCCAStartHitsSorter> <<<30, 256, 0, cudaStreams[iSlice & 1]>>>(iSlice);
660 if (CUDASync("Start Hits Sorter")) return 1;
662 StandalonePerfTime(firstSlice + iSlice, 5);
664 if (fDebugLevel >= 2)
666 CudaFailedMsg(cudaMemcpy(fSlaveTrackers[firstSlice + iSlice].CommonMemory(), fGpuTracker[iSlice].CommonMemory(), fGpuTracker[iSlice].CommonMemorySize(), cudaMemcpyDeviceToHost));
667 if (fDebugLevel >= 3) HLTInfo("Obtaining Number of Start Hits from GPU: %d", *fSlaveTrackers[firstSlice + iSlice].NTracklets());
668 if (*fSlaveTrackers[firstSlice + iSlice].NTracklets() > HLTCA_GPU_MAX_TRACKLETS)
670 HLTError("HLTCA_GPU_MAX_TRACKLETS constant insuffisant");
675 if (fDebugLevel >= 4)
677 *fOutFile << "Temporary ";
678 CudaFailedMsg(cudaMemcpy(fSlaveTrackers[firstSlice + iSlice].TrackletStartHits(), fGpuTracker[iSlice].TrackletTmpStartHits(), pClusterData[iSlice].NumberOfClusters() * sizeof(AliHLTTPCCAHitId), cudaMemcpyDeviceToHost));
679 fSlaveTrackers[firstSlice + iSlice].DumpStartHits(*fOutFile);
680 uint3* tmpMemory = (uint3*) malloc(sizeof(uint3) * fSlaveTrackers[firstSlice + iSlice].Param().NRows());
681 CudaFailedMsg(cudaMemcpy(tmpMemory, fGpuTracker[iSlice].RowStartHitCountOffset(), fSlaveTrackers[firstSlice + iSlice].Param().NRows() * sizeof(uint3), cudaMemcpyDeviceToHost));
682 *fOutFile << "Start Hits Sort Vector:" << std::endl;
683 for (int i = 0;i < fSlaveTrackers[firstSlice + iSlice].Param().NRows();i++)
685 *fOutFile << "Row: " << i << ", Len: " << tmpMemory[i].x << ", Offset: " << tmpMemory[i].y << ", New Offset: " << tmpMemory[i].z << std::endl;
689 CudaFailedMsg(cudaMemcpy(fSlaveTrackers[firstSlice + iSlice].HitMemory(), fGpuTracker[iSlice].HitMemory(), fSlaveTrackers[firstSlice + iSlice].HitMemorySize(), cudaMemcpyDeviceToHost));
690 fSlaveTrackers[firstSlice + iSlice].DumpStartHits(*fOutFile);
693 StandalonePerfTime(firstSlice + iSlice, 6);
695 fSlaveTrackers[firstSlice + iSlice].SetGPUTrackerTracksMemory((char*) TracksMemory(fHostLockedMemory, iSlice), HLTCA_GPU_MAX_TRACKS, pClusterData[iSlice].NumberOfClusters());
698 StandalonePerfTime(firstSlice, 7);
699 #ifdef HLTCA_GPU_PREFETCHDATA
700 for (int iSlice = 0;iSlice < sliceCountLocal;iSlice++)
702 if (fSlaveTrackers[firstSlice + iSlice].Data().GPUSharedDataReq() * sizeof(ushort_v) > ALIHLTTPCCATRACKLET_CONSTRUCTOR_TEMP_MEM / 4 * sizeof(uint4))
704 HLTError("Insufficiant GPU shared Memory, required: %d, available %d", fSlaveTrackers[firstSlice + iSlice].Data().GPUSharedDataReq() * sizeof(ushort_v), ALIHLTTPCCATRACKLET_CONSTRUCTOR_TEMP_MEM / 4 * sizeof(uint4));
707 if (fDebugLevel >= 1)
709 static int infoShown = 0;
712 HLTInfo("GPU Shared Memory Cache Size: %d", 2 * fSlaveTrackers[firstSlice + iSlice].Data().GPUSharedDataReq() * sizeof(ushort_v));
719 int nHardCollisions = 0;
721 RestartTrackletConstructor:
722 if (fDebugLevel >= 3) HLTInfo("Initialising Tracklet Constructor Scheduler");
723 for (int iSlice = 0;iSlice < sliceCountLocal;iSlice++)
725 AliHLTTPCCATrackletConstructorInit<<<HLTCA_GPU_MAX_TRACKLETS /* *fSlaveTrackers[firstSlice + iSlice].NTracklets() */ / HLTCA_GPU_THREAD_COUNT + 1, HLTCA_GPU_THREAD_COUNT>>>(iSlice);
726 if (CUDASync("Tracklet Initializer")) return 1;
727 DumpRowBlocks(fSlaveTrackers, iSlice);
730 if (fDebugLevel >= 3) HLTInfo("Running GPU Tracklet Constructor");
731 AliHLTTPCCATrackletConstructorNewGPU<<<HLTCA_GPU_BLOCK_COUNT, HLTCA_GPU_THREAD_COUNT>>>();
732 if (CUDASync("Tracklet Constructor (new)")) return 1;
734 StandalonePerfTime(firstSlice, 8);
736 if (fDebugLevel >= 4)
738 for (int iSlice = 0;iSlice < sliceCountLocal;iSlice++)
740 DumpRowBlocks(&fSlaveTrackers[firstSlice], iSlice, false);
741 CudaFailedMsg(cudaMemcpy(fSlaveTrackers[firstSlice + iSlice].CommonMemory(), fGpuTracker[iSlice].CommonMemory(), fGpuTracker[iSlice].CommonMemorySize(), cudaMemcpyDeviceToHost));
742 if (fDebugLevel >= 5)
744 HLTInfo("Obtained %d tracklets", *fSlaveTrackers[firstSlice + iSlice].NTracklets());
746 CudaFailedMsg(cudaMemcpy(fSlaveTrackers[firstSlice + iSlice].TrackletMemory(), fGpuTracker[iSlice].TrackletMemory(), fGpuTracker[iSlice].TrackletMemorySize(), cudaMemcpyDeviceToHost));
747 CudaFailedMsg(cudaMemcpy(fSlaveTrackers[firstSlice + iSlice].HitMemory(), fGpuTracker[iSlice].HitMemory(), fGpuTracker[iSlice].HitMemorySize(), cudaMemcpyDeviceToHost));
748 fSlaveTrackers[firstSlice + iSlice].DumpTrackletHits(*fOutFile);
753 for (int iSlice = 0;iSlice < sliceCountLocal;iSlice += runSlices)
755 if (runSlices < HLTCA_GPU_TRACKLET_SELECTOR_SLICE_COUNT) runSlices++;
756 if (fDebugLevel >= 3) HLTInfo("Running HLT Tracklet selector (Slice %d to %d)", iSlice, iSlice + runSlices);
757 AliHLTTPCCAProcessMulti<AliHLTTPCCATrackletSelector><<<HLTCA_GPU_BLOCK_COUNT, HLTCA_GPU_THREAD_COUNT, 0, cudaStreams[iSlice]>>>(iSlice, CAMath::Min(runSlices, sliceCountLocal - iSlice));
759 if (CUDASync("Tracklet Selector")) return 1;
760 StandalonePerfTime(firstSlice, 9);
762 CudaFailedMsg(cudaMemcpyAsync(fSlaveTrackers[firstSlice + 0].CommonMemory(), fGpuTracker[0].CommonMemory(), fGpuTracker[0].CommonMemorySize(), cudaMemcpyDeviceToHost, cudaStreams[0]));
763 for (int iSliceTmp = 0;iSliceTmp <= sliceCountLocal;iSliceTmp++)
765 if (iSliceTmp < sliceCountLocal)
767 int iSlice = iSliceTmp;
768 if (fDebugLevel >= 3) HLTInfo("Transfering Tracks from GPU to Host");
769 cudaStreamSynchronize(cudaStreams[iSlice]);
770 CudaFailedMsg(cudaMemcpyAsync(fSlaveTrackers[firstSlice + iSlice].Tracks(), fGpuTracker[iSlice].Tracks(), sizeof(AliHLTTPCCATrack) * *fSlaveTrackers[firstSlice + iSlice].NTracks(), cudaMemcpyDeviceToHost, cudaStreams[iSlice]));
771 CudaFailedMsg(cudaMemcpyAsync(fSlaveTrackers[firstSlice + iSlice].TrackHits(), fGpuTracker[iSlice].TrackHits(), sizeof(AliHLTTPCCAHitId) * *fSlaveTrackers[firstSlice + iSlice].NTrackHits(), cudaMemcpyDeviceToHost, cudaStreams[iSlice]));
772 if (iSlice + 1 < sliceCountLocal)
773 CudaFailedMsg(cudaMemcpyAsync(fSlaveTrackers[firstSlice + iSlice + 1].CommonMemory(), fGpuTracker[iSlice + 1].CommonMemory(), fGpuTracker[iSlice + 1].CommonMemorySize(), cudaMemcpyDeviceToHost, cudaStreams[iSlice + 1]));
778 int iSlice = iSliceTmp - 1;
779 cudaStreamSynchronize(cudaStreams[iSlice]);
781 if (fDebugLevel >= 4)
783 CudaFailedMsg(cudaMemcpy(fSlaveTrackers[firstSlice + iSlice].Data().HitWeights(), fGpuTracker[iSlice].Data().HitWeights(), fSlaveTrackers[firstSlice + iSlice].Data().NumberOfHitsPlusAlign() * sizeof(int), cudaMemcpyDeviceToHost));
784 fSlaveTrackers[firstSlice + iSlice].DumpHitWeights(*fOutFile);
785 fSlaveTrackers[firstSlice + iSlice].DumpTrackHits(*fOutFile);
788 if (fSlaveTrackers[firstSlice + iSlice].GPUParameters()->fGPUError)
790 if (fSlaveTrackers[firstSlice + iSlice].GPUParameters()->fGPUError == HLTCA_GPU_ERROR_SCHEDULE_COLLISION && nHardCollisions++ < 10)
792 HLTWarning("Hard scheduling collision occured, rerunning Tracklet Constructor");
793 for (int i = 0;i < sliceCountLocal;i++)
795 cudaThreadSynchronize();
796 CudaFailedMsg(cudaMemcpy(fSlaveTrackers[firstSlice + i].CommonMemory(), fGpuTracker[i].CommonMemory(), fGpuTracker[i].CommonMemorySize(), cudaMemcpyDeviceToHost));
797 *fSlaveTrackers[firstSlice + i].NTracks() = 0;
798 *fSlaveTrackers[firstSlice + i].NTrackHits() = 0;
799 fSlaveTrackers[firstSlice + i].GPUParameters()->fGPUError = HLTCA_GPU_ERROR_NONE;
800 CudaFailedMsg(cudaMemcpy(fGpuTracker[i].CommonMemory(), fSlaveTrackers[firstSlice + i].CommonMemory(), fGpuTracker[i].CommonMemorySize(), cudaMemcpyHostToDevice));
801 PreInitRowBlocks<<<30, 256>>>(fGpuTracker[i].RowBlockPos(), fGpuTracker[i].RowBlockTracklets(), fGpuTracker[i].Data().HitWeights(), fSlaveTrackers[firstSlice + i].Data().NumberOfHitsPlusAlign());
803 goto RestartTrackletConstructor;
805 HLTError("GPU Tracker returned Error Code %d", fSlaveTrackers[firstSlice + iSlice].GPUParameters()->fGPUError);
808 if (fDebugLevel >= 3) HLTInfo("Tracks Transfered: %d / %d", *fSlaveTrackers[firstSlice + iSlice].NTracks(), *fSlaveTrackers[firstSlice + iSlice].NTrackHits());
810 fSlaveTrackers[firstSlice + iSlice].SetOutput(&pOutput[iSlice]);
811 #ifdef HLTCA_GPU_TIME_PROFILE
812 AliHLTTPCCAStandaloneFramework::StandaloneQueryTime(&a);
814 fSlaveTrackers[firstSlice + iSlice].WriteOutput();
815 #ifdef HLTCA_GPU_TIME_PROFILE
816 AliHLTTPCCAStandaloneFramework::StandaloneQueryTime(&b);
817 printf("Write %f %f\n", ((double) b - (double) a) / (double) c, ((double) a - (double) d) / (double) c);
820 if (fDebugLevel >= 4)
822 delete[] fSlaveTrackers[firstSlice + iSlice].HitMemory();
823 delete[] fSlaveTrackers[firstSlice + iSlice].TrackletMemory();
828 StandalonePerfTime(firstSlice, 10);
830 if (fDebugLevel >= 3) HLTInfo("GPU Reconstruction finished");
832 #ifdef HLTCA_GPU_TRACKLET_CONSTRUCTOR_DO_PROFILE
833 char* stageAtSync = (char*) malloc(100000000);
834 CudaFailedMsg(cudaMemcpy(stageAtSync, fGpuTracker[0].fStageAtSync, 100 * 1000 * 1000, cudaMemcpyDeviceToHost));
835 cudaFree(fGpuTracker[0].fStageAtSync);
837 FILE* fp = fopen("profile.txt", "w+");
838 FILE* fp2 = fopen("profile.bmp", "w+b");
839 int nEmptySync = 0, fEmpty;
841 const int bmpheight = 1000;
842 BITMAPFILEHEADER bmpFH;
843 BITMAPINFOHEADER bmpIH;
844 ZeroMemory(&bmpFH, sizeof(bmpFH));
845 ZeroMemory(&bmpIH, sizeof(bmpIH));
847 bmpFH.bfType = 19778; //"BM"
848 bmpFH.bfSize = sizeof(bmpFH) + sizeof(bmpIH) + (HLTCA_GPU_BLOCK_COUNT * HLTCA_GPU_THREAD_COUNT / 32 * 33 - 1) * bmpheight ;
849 bmpFH.bfOffBits = sizeof(bmpFH) + sizeof(bmpIH);
851 bmpIH.biSize = sizeof(bmpIH);
852 bmpIH.biWidth = HLTCA_GPU_BLOCK_COUNT * HLTCA_GPU_THREAD_COUNT / 32 * 33 - 1;
853 bmpIH.biHeight = bmpheight;
855 bmpIH.biBitCount = 32;
857 fwrite(&bmpFH, 1, sizeof(bmpFH), fp2);
858 fwrite(&bmpIH, 1, sizeof(bmpIH), fp2);
860 for (int i = 0;i < bmpheight * HLTCA_GPU_BLOCK_COUNT * HLTCA_GPU_THREAD_COUNT;i += HLTCA_GPU_BLOCK_COUNT * HLTCA_GPU_THREAD_COUNT)
863 for (int j = 0;j < HLTCA_GPU_BLOCK_COUNT * HLTCA_GPU_THREAD_COUNT;j++)
865 fprintf(fp, "%d\t", stageAtSync[i + j]);
867 if (stageAtSync[i + j] == 1) color = RGB(255, 0, 0);
868 if (stageAtSync[i + j] == 2) color = RGB(0, 255, 0);
869 if (stageAtSync[i + j] == 3) color = RGB(0, 0, 255);
870 if (stageAtSync[i + j] == 4) color = RGB(255, 255, 0);
871 fwrite(&color, 1, sizeof(int), fp2);
872 if (j > 0 && j % 32 == 0)
874 color = RGB(255, 255, 255);
875 fwrite(&color, 1, 4, fp2);
877 if (stageAtSync[i + j]) fEmpty = 0;
880 if (fEmpty) nEmptySync++;
882 //if (nEmptySync == HLTCA_GPU_SCHED_ROW_STEP + 2) break;
893 int AliHLTTPCCAGPUTracker::InitializeSliceParam(int iSlice, AliHLTTPCCAParam ¶m)
895 //Initialize Slice Tracker Parameter for a slave tracker
896 fSlaveTrackers[iSlice].Initialize(param);
897 if (fSlaveTrackers[iSlice].Param().NRows() != HLTCA_ROW_COUNT)
899 HLTError("Error, Slice Tracker %d Row Count of %d exceeds Constant of %d", iSlice, fSlaveTrackers[iSlice].Param().NRows(), HLTCA_ROW_COUNT);
905 int AliHLTTPCCAGPUTracker::ExitGPU()
908 cudaThreadSynchronize();
911 cudaFree(fGPUMemory);
914 if (fHostLockedMemory)
916 for (int i = 0;i < CAMath::Max(3, fSliceCount);i++)
918 cudaStreamDestroy(((cudaStream_t*) fpCudaStreams)[i]);
922 cudaFreeHost(fHostLockedMemory);
925 if (CudaFailedMsg(cudaThreadExit()))
927 HLTError("Could not uninitialize GPU");
930 HLTInfo("CUDA Uninitialized");
932 fCudaInitialized = 0;
936 void AliHLTTPCCAGPUTracker::SetOutputControl( AliHLTTPCCASliceOutput::outputControlStruct* val)
938 fOutputControl = val;
939 for (int i = 0;i < fgkNSlices;i++)
941 fSlaveTrackers[i].SetOutputControl(val);