| //===----------------------------------------------------------------------===// |
| // |
| // The LLVM Compiler Infrastructure |
| // |
| // This file is dual licensed under the MIT and the University of Illinois Open |
| // Source Licenses. See LICENSE.txt for details. |
| // |
| //===----------------------------------------------------------------------===// |
| |
| |
| // Forward declaration as the following 2 functions are declared as friend in offload_engine.h |
| // CLANG does not like static to been after friend declaration. |
| static void __offload_init_library_once(void); |
| static void __offload_fini_library(void); |
| |
| #include "offload_host.h" |
| #ifdef MYO_SUPPORT |
| #include "offload_myo_host.h" |
| #endif |
| |
| #include <malloc.h> |
| #ifndef TARGET_WINNT |
| #include <alloca.h> |
| #include <elf.h> |
| #endif // TARGET_WINNT |
| #include <errno.h> |
| #include <fcntl.h> |
| #include <stdlib.h> |
| #include <string.h> |
| #include <sys/stat.h> |
| #include <sys/types.h> |
| #include <sys/stat.h> |
| |
| #include <algorithm> |
| #include <bitset> |
| |
| #if defined(HOST_WINNT) |
| #define PATH_SEPARATOR ";" |
| #else |
| #define PATH_SEPARATOR ":" |
| #endif |
| |
| #define GET_OFFLOAD_NUMBER(timer_data) \ |
| timer_data? timer_data->offload_number : 0 |
| |
| #ifdef TARGET_WINNT |
| // Small subset of ELF declarations for Windows which is needed to compile |
| // this file. ELF header is used to understand what binary type is contained |
| // in the target image - shared library or executable. |
| |
| typedef uint16_t Elf64_Half; |
| typedef uint32_t Elf64_Word; |
| typedef uint64_t Elf64_Addr; |
| typedef uint64_t Elf64_Off; |
| |
| #define EI_NIDENT 16 |
| |
| #define ET_EXEC 2 |
| #define ET_DYN 3 |
| |
| typedef struct |
| { |
| unsigned char e_ident[EI_NIDENT]; |
| Elf64_Half e_type; |
| Elf64_Half e_machine; |
| Elf64_Word e_version; |
| Elf64_Addr e_entry; |
| Elf64_Off e_phoff; |
| Elf64_Off e_shoff; |
| Elf64_Word e_flags; |
| Elf64_Half e_ehsize; |
| Elf64_Half e_phentsize; |
| Elf64_Half e_phnum; |
| Elf64_Half e_shentsize; |
| Elf64_Half e_shnum; |
| Elf64_Half e_shstrndx; |
| } Elf64_Ehdr; |
| #endif // TARGET_WINNT |
| |
| // Host console and file logging |
| const char *prefix; |
| int console_enabled = 0; |
| int offload_number = 0; |
| |
| static const char *htrace_envname = "H_TRACE"; |
| static const char *offload_report_envname = "OFFLOAD_REPORT"; |
| static char *timer_envname = "H_TIME"; |
| |
| // Trace information |
| static const char* vardesc_direction_as_string[] = { |
| "NOCOPY", |
| "IN", |
| "OUT", |
| "INOUT" |
| }; |
| static const char* vardesc_type_as_string[] = { |
| "unknown", |
| "data", |
| "data_ptr", |
| "func_ptr", |
| "void_ptr", |
| "string_ptr", |
| "dv", |
| "dv_data", |
| "dv_data_slice", |
| "dv_ptr", |
| "dv_ptr_data", |
| "dv_ptr_data_slice", |
| "cean_var", |
| "cean_var_ptr", |
| "c_data_ptr_array", |
| "c_func_ptr_array", |
| "c_void_ptr_array", |
| "c_string_ptr_array" |
| }; |
| |
| Engine* mic_engines = 0; |
| uint32_t mic_engines_total = 0; |
| pthread_key_t mic_thread_key; |
| MicEnvVar mic_env_vars; |
| uint64_t cpu_frequency = 0; |
| |
| // MIC_STACKSIZE |
| uint32_t mic_stack_size = 12 * 1024 * 1024; |
| |
| // MIC_BUFFERSIZE |
| uint64_t mic_buffer_size = 0; |
| |
| // MIC_LD_LIBRARY_PATH |
| char* mic_library_path = 0; |
| |
| // MIC_PROXY_IO |
| bool mic_proxy_io = true; |
| |
| // MIC_PROXY_FS_ROOT |
| char* mic_proxy_fs_root = 0; |
| |
| // Threshold for creating buffers with large pages. Buffer is created |
| // with large pages hint if its size exceeds the threshold value. |
| // By default large pages are disabled right now (by setting default |
| // value for threshold to MAX) due to HSD 4114629. |
| uint64_t __offload_use_2mb_buffers = 0xffffffffffffffffULL; |
| static const char *mic_use_2mb_buffers_envname = |
| "MIC_USE_2MB_BUFFERS"; |
| |
| static uint64_t __offload_use_async_buffer_write = 2 * 1024 * 1024; |
| static const char *mic_use_async_buffer_write_envname = |
| "MIC_USE_ASYNC_BUFFER_WRITE"; |
| |
| static uint64_t __offload_use_async_buffer_read = 2 * 1024 * 1024; |
| static const char *mic_use_async_buffer_read_envname = |
| "MIC_USE_ASYNC_BUFFER_READ"; |
| |
| // device initialization type |
| OffloadInitType __offload_init_type = c_init_on_offload_all; |
| static const char *offload_init_envname = "OFFLOAD_INIT"; |
| |
| // active wait |
| static bool __offload_active_wait = true; |
| static const char *offload_active_wait_envname = "OFFLOAD_ACTIVE_WAIT"; |
| |
| // OMP_DEFAULT_DEVICE |
| int __omp_device_num = 0; |
| static const char *omp_device_num_envname = "OMP_DEFAULT_DEVICE"; |
| |
| // The list of pending target libraries |
| static bool __target_libs; |
| static TargetImageList __target_libs_list; |
| static mutex_t __target_libs_lock; |
| static mutex_t stack_alloc_lock; |
| |
| // Target executable |
| TargetImage* __target_exe; |
| |
| static char * offload_get_src_base(void * ptr, uint8_t type) |
| { |
| char *base; |
| if (VAR_TYPE_IS_PTR(type)) { |
| base = *static_cast<char**>(ptr); |
| } |
| else if (VAR_TYPE_IS_SCALAR(type)) { |
| base = static_cast<char*>(ptr); |
| } |
| else if (VAR_TYPE_IS_DV_DATA_SLICE(type) || VAR_TYPE_IS_DV_DATA(type)) { |
| ArrDesc *dvp; |
| if (VAR_TYPE_IS_DV_DATA_SLICE(type)) { |
| const arr_desc *ap = static_cast<const arr_desc*>(ptr); |
| dvp = (type == c_dv_data_slice) ? |
| reinterpret_cast<ArrDesc*>(ap->base) : |
| *reinterpret_cast<ArrDesc**>(ap->base); |
| } |
| else { |
| dvp = (type == c_dv_data) ? |
| static_cast<ArrDesc*>(ptr) : |
| *static_cast<ArrDesc**>(ptr); |
| } |
| base = reinterpret_cast<char*>(dvp->Base); |
| } |
| else { |
| base = NULL; |
| } |
| return base; |
| } |
| |
| void OffloadDescriptor::report_coi_error(error_types msg, COIRESULT res) |
| { |
| // special case for the 'process died' error |
| if (res == COI_PROCESS_DIED) { |
| m_device.fini_process(true); |
| } |
| else { |
| switch (msg) { |
| case c_buf_create: |
| if (res == COI_OUT_OF_MEMORY) { |
| msg = c_buf_create_out_of_mem; |
| } |
| /* fallthru */ |
| |
| case c_buf_create_from_mem: |
| case c_buf_get_address: |
| case c_pipeline_create: |
| case c_pipeline_run_func: |
| LIBOFFLOAD_ERROR(msg, m_device.get_logical_index(), res); |
| break; |
| |
| case c_buf_read: |
| case c_buf_write: |
| case c_buf_copy: |
| case c_buf_map: |
| case c_buf_unmap: |
| case c_buf_destroy: |
| case c_buf_set_state: |
| LIBOFFLOAD_ERROR(msg, res); |
| break; |
| |
| default: |
| break; |
| } |
| } |
| |
| exit(1); |
| } |
| |
| _Offload_result OffloadDescriptor::translate_coi_error(COIRESULT res) const |
| { |
| switch (res) { |
| case COI_SUCCESS: |
| return OFFLOAD_SUCCESS; |
| |
| case COI_PROCESS_DIED: |
| return OFFLOAD_PROCESS_DIED; |
| |
| case COI_OUT_OF_MEMORY: |
| return OFFLOAD_OUT_OF_MEMORY; |
| |
| default: |
| return OFFLOAD_ERROR; |
| } |
| } |
| |
| bool OffloadDescriptor::alloc_ptr_data( |
| PtrData* &ptr_data, |
| void *base, |
| int64_t disp, |
| int64_t size, |
| int64_t alloc_disp, |
| int align |
| ) |
| { |
| // total length of base |
| int64_t length = disp + size; |
| bool is_new; |
| |
| OFFLOAD_TRACE(3, "Creating association for data: addr %p, length %lld\n", |
| base, length); |
| |
| // add new entry |
| ptr_data = m_device.insert_ptr_data(base, length, is_new); |
| if (is_new) { |
| |
| OFFLOAD_TRACE(3, "Added new association\n"); |
| |
| if (length > 0) { |
| OffloadTimer timer(get_timer_data(), c_offload_host_alloc_buffers); |
| COIRESULT res; |
| |
| // align should be a power of 2 |
| if (align > 0 && (align & (align - 1)) == 0) { |
| // offset within mic_buffer. Can do offset optimization |
| // only when source address alignment satisfies requested |
| // alignment on the target (cq172736). |
| if ((reinterpret_cast<intptr_t>(base) & (align - 1)) == 0) { |
| ptr_data->mic_offset = reinterpret_cast<intptr_t>(base) & 4095; |
| } |
| } |
| |
| // buffer size and flags |
| uint64_t buffer_size = length + ptr_data->mic_offset; |
| uint32_t buffer_flags = 0; |
| |
| // create buffer with large pages if data length exceeds |
| // large page threshold |
| if (length >= __offload_use_2mb_buffers) { |
| buffer_flags = COI_OPTIMIZE_HUGE_PAGE_SIZE; |
| } |
| |
| // create CPU buffer |
| OFFLOAD_DEBUG_TRACE_1(3, |
| GET_OFFLOAD_NUMBER(get_timer_data()), |
| c_offload_create_buf_host, |
| "Creating buffer from source memory %p, " |
| "length %lld\n", base, length); |
| |
| // result is not checked because we can continue without cpu |
| // buffer. In this case we will use COIBufferRead/Write instead |
| // of COIBufferCopy. |
| COI::BufferCreateFromMemory(length, |
| COI_BUFFER_NORMAL, |
| 0, |
| base, |
| 1, |
| &m_device.get_process(), |
| &ptr_data->cpu_buf); |
| |
| OFFLOAD_DEBUG_TRACE_1(3, |
| GET_OFFLOAD_NUMBER(get_timer_data()), |
| c_offload_create_buf_mic, |
| "Creating buffer for sink: size %lld, offset %d, " |
| "flags =0x%x\n", buffer_size - alloc_disp, |
| ptr_data->mic_offset, buffer_flags); |
| |
| // create MIC buffer |
| res = COI::BufferCreate(buffer_size - alloc_disp, |
| COI_BUFFER_NORMAL, |
| buffer_flags, |
| 0, |
| 1, |
| &m_device.get_process(), |
| &ptr_data->mic_buf); |
| if (res != COI_SUCCESS) { |
| if (m_status != 0) { |
| m_status->result = translate_coi_error(res); |
| } |
| else if (m_is_mandatory) { |
| report_coi_error(c_buf_create, res); |
| } |
| ptr_data->alloc_ptr_data_lock.unlock(); |
| return false; |
| } |
| |
| // make buffer valid on the device. |
| res = COI::BufferSetState(ptr_data->mic_buf, |
| m_device.get_process(), |
| COI_BUFFER_VALID, |
| COI_BUFFER_NO_MOVE, |
| 0, 0, 0); |
| if (res != COI_SUCCESS) { |
| if (m_status != 0) { |
| m_status->result = translate_coi_error(res); |
| } |
| else if (m_is_mandatory) { |
| report_coi_error(c_buf_set_state, res); |
| } |
| ptr_data->alloc_ptr_data_lock.unlock(); |
| return false; |
| } |
| |
| res = COI::BufferSetState(ptr_data->mic_buf, |
| COI_PROCESS_SOURCE, |
| COI_BUFFER_INVALID, |
| COI_BUFFER_NO_MOVE, |
| 0, 0, 0); |
| if (res != COI_SUCCESS) { |
| if (m_status != 0) { |
| m_status->result = translate_coi_error(res); |
| } |
| else if (m_is_mandatory) { |
| report_coi_error(c_buf_set_state, res); |
| } |
| ptr_data->alloc_ptr_data_lock.unlock(); |
| return false; |
| } |
| } |
| |
| ptr_data->alloc_disp = alloc_disp; |
| ptr_data->alloc_ptr_data_lock.unlock(); |
| } |
| else { |
| mutex_locker_t locker(ptr_data->alloc_ptr_data_lock); |
| |
| OFFLOAD_TRACE(3, "Found existing association: addr %p, length %lld, " |
| "is_static %d\n", |
| ptr_data->cpu_addr.start(), ptr_data->cpu_addr.length(), |
| ptr_data->is_static); |
| |
| // This is not a new entry. Make sure that provided address range fits |
| // into existing one. |
| MemRange addr_range(base, length - ptr_data->alloc_disp); |
| if (!ptr_data->cpu_addr.contains(addr_range)) { |
| LIBOFFLOAD_ERROR(c_bad_ptr_mem_range); |
| exit(1); |
| } |
| |
| // if the entry is associated with static data it may not have buffers |
| // created because they are created on demand. |
| if (ptr_data->is_static && !init_static_ptr_data(ptr_data)) { |
| return false; |
| } |
| } |
| |
| return true; |
| } |
| |
| bool OffloadDescriptor::find_ptr_data( |
| PtrData* &ptr_data, |
| void *base, |
| int64_t disp, |
| int64_t size, |
| bool report_error |
| ) |
| { |
| // total length of base |
| int64_t length = disp + size; |
| |
| OFFLOAD_TRACE(3, "Looking for association for data: addr %p, " |
| "length %lld\n", base, length); |
| |
| // find existing association in pointer table |
| ptr_data = m_device.find_ptr_data(base); |
| if (ptr_data == 0) { |
| if (report_error) { |
| LIBOFFLOAD_ERROR(c_no_ptr_data, base); |
| exit(1); |
| } |
| OFFLOAD_TRACE(3, "Association does not exist\n"); |
| return true; |
| } |
| |
| OFFLOAD_TRACE(3, "Found association: base %p, length %lld, is_static %d\n", |
| ptr_data->cpu_addr.start(), ptr_data->cpu_addr.length(), |
| ptr_data->is_static); |
| |
| // make sure that provided address range fits into existing one |
| MemRange addr_range(base, length); |
| if (!ptr_data->cpu_addr.contains(addr_range)) { |
| if (report_error) { |
| LIBOFFLOAD_ERROR(c_bad_ptr_mem_range); |
| exit(1); |
| } |
| OFFLOAD_TRACE(3, "Existing association partially overlaps with " |
| "data address range\n"); |
| ptr_data = 0; |
| return true; |
| } |
| |
| // if the entry is associated with static data it may not have buffers |
| // created because they are created on demand. |
| if (ptr_data->is_static && !init_static_ptr_data(ptr_data)) { |
| return false; |
| } |
| |
| return true; |
| } |
| |
| bool OffloadDescriptor::init_static_ptr_data(PtrData *ptr_data) |
| { |
| OffloadTimer timer(get_timer_data(), c_offload_host_alloc_buffers); |
| |
| if (ptr_data->cpu_buf == 0) { |
| OFFLOAD_TRACE(3, "Creating buffer from source memory %llx\n", |
| ptr_data->cpu_addr.start()); |
| |
| COIRESULT res = COI::BufferCreateFromMemory( |
| ptr_data->cpu_addr.length(), |
| COI_BUFFER_NORMAL, |
| 0, |
| const_cast<void*>(ptr_data->cpu_addr.start()), |
| 1, &m_device.get_process(), |
| &ptr_data->cpu_buf); |
| |
| if (res != COI_SUCCESS) { |
| if (m_status != 0) { |
| m_status->result = translate_coi_error(res); |
| return false; |
| } |
| report_coi_error(c_buf_create_from_mem, res); |
| } |
| } |
| |
| if (ptr_data->mic_buf == 0) { |
| OFFLOAD_TRACE(3, "Creating buffer from sink memory %llx\n", |
| ptr_data->mic_addr); |
| |
| COIRESULT res = COI::BufferCreateFromMemory( |
| ptr_data->cpu_addr.length(), |
| COI_BUFFER_NORMAL, |
| COI_SINK_MEMORY, |
| reinterpret_cast<void*>(ptr_data->mic_addr), |
| 1, &m_device.get_process(), |
| &ptr_data->mic_buf); |
| |
| if (res != COI_SUCCESS) { |
| if (m_status != 0) { |
| m_status->result = translate_coi_error(res); |
| return false; |
| } |
| report_coi_error(c_buf_create_from_mem, res); |
| } |
| } |
| |
| return true; |
| } |
| |
| bool OffloadDescriptor::init_mic_address(PtrData *ptr_data) |
| { |
| if (ptr_data->mic_buf != 0 && ptr_data->mic_addr == 0) { |
| COIRESULT res = COI::BufferGetSinkAddress(ptr_data->mic_buf, |
| &ptr_data->mic_addr); |
| if (res != COI_SUCCESS) { |
| if (m_status != 0) { |
| m_status->result = translate_coi_error(res); |
| } |
| else if (m_is_mandatory) { |
| report_coi_error(c_buf_get_address, res); |
| } |
| return false; |
| } |
| } |
| return true; |
| } |
| |
| bool OffloadDescriptor::nullify_target_stack( |
| COIBUFFER targ_buf, |
| uint64_t size |
| ) |
| { |
| char * ptr = (char*)malloc(size); |
| COIRESULT res; |
| |
| memset(ptr, 0, size); |
| res = COI::BufferWrite( |
| targ_buf, |
| 0, |
| ptr, |
| size, |
| COI_COPY_UNSPECIFIED, |
| 0, 0, 0); |
| free(ptr); |
| if (res != COI_SUCCESS) { |
| if (m_status != 0) { |
| m_status->result = translate_coi_error(res); |
| return false; |
| } |
| report_coi_error(c_buf_write, res); |
| } |
| return true; |
| } |
| |
| bool OffloadDescriptor::offload_stack_memory_manager( |
| const void * stack_begin, |
| int routine_id, |
| int buf_size, |
| int align, |
| bool *is_new) |
| { |
| mutex_locker_t locker(stack_alloc_lock); |
| |
| PersistData * new_el; |
| PersistDataList::iterator it_begin = m_device.m_persist_list.begin(); |
| PersistDataList::iterator it_end; |
| int erase = 0; |
| |
| *is_new = false; |
| |
| for (PersistDataList::iterator it = m_device.m_persist_list.begin(); |
| it != m_device.m_persist_list.end(); it++) { |
| PersistData cur_el = *it; |
| |
| if (stack_begin > it->stack_cpu_addr) { |
| // this stack data must be destroyed |
| m_destroy_stack.push_front(cur_el.stack_ptr_data); |
| it_end = it; |
| erase++; |
| } |
| else if (stack_begin == it->stack_cpu_addr) { |
| if (routine_id != it-> routine_id) { |
| // this stack data must be destroyed |
| m_destroy_stack.push_front(cur_el.stack_ptr_data); |
| it_end = it; |
| erase++; |
| break; |
| } |
| else { |
| // stack data is reused |
| m_stack_ptr_data = it->stack_ptr_data; |
| if (erase > 0) { |
| // all obsolete stack sections must be erased from the list |
| m_device.m_persist_list.erase(it_begin, ++it_end); |
| |
| m_in_datalen += |
| erase * sizeof(new_el->stack_ptr_data->mic_addr); |
| } |
| OFFLOAD_TRACE(3, "Reuse of stack buffer with addr %p\n", |
| m_stack_ptr_data->mic_addr); |
| return true; |
| } |
| } |
| else if (stack_begin < it->stack_cpu_addr) { |
| break; |
| } |
| } |
| |
| if (erase > 0) { |
| // all obsolete stack sections must be erased from the list |
| m_device.m_persist_list.erase(it_begin, ++it_end); |
| m_in_datalen += erase * sizeof(new_el->stack_ptr_data->mic_addr); |
| } |
| // new stack table is created |
| new_el = new PersistData(stack_begin, routine_id, buf_size); |
| // create MIC buffer |
| COIRESULT res; |
| uint32_t buffer_flags = 0; |
| |
| // create buffer with large pages if data length exceeds |
| // large page threshold |
| if (buf_size >= __offload_use_2mb_buffers) { |
| buffer_flags = COI_OPTIMIZE_HUGE_PAGE_SIZE; |
| } |
| res = COI::BufferCreate(buf_size, |
| COI_BUFFER_NORMAL, |
| buffer_flags, |
| 0, |
| 1, |
| &m_device.get_process(), |
| &new_el->stack_ptr_data->mic_buf); |
| if (res != COI_SUCCESS) { |
| if (m_status != 0) { |
| m_status->result = translate_coi_error(res); |
| } |
| else if (m_is_mandatory) { |
| report_coi_error(c_buf_create, res); |
| } |
| return false; |
| } |
| // make buffer valid on the device. |
| res = COI::BufferSetState(new_el->stack_ptr_data->mic_buf, |
| m_device.get_process(), |
| COI_BUFFER_VALID, |
| COI_BUFFER_NO_MOVE, |
| 0, 0, 0); |
| if (res != COI_SUCCESS) { |
| if (m_status != 0) { |
| m_status->result = translate_coi_error(res); |
| } |
| else if (m_is_mandatory) { |
| report_coi_error(c_buf_set_state, res); |
| } |
| return false; |
| } |
| res = COI::BufferSetState(new_el->stack_ptr_data->mic_buf, |
| COI_PROCESS_SOURCE, |
| COI_BUFFER_INVALID, |
| COI_BUFFER_NO_MOVE, |
| 0, 0, 0); |
| if (res != COI_SUCCESS) { |
| if (m_status != 0) { |
| m_status->result = translate_coi_error(res); |
| } |
| else if (m_is_mandatory) { |
| report_coi_error(c_buf_set_state, res); |
| } |
| return false; |
| } |
| // persistence algorithm requires target stack initialy to be nullified |
| if (!nullify_target_stack(new_el->stack_ptr_data->mic_buf, buf_size)) { |
| return false; |
| } |
| |
| m_stack_ptr_data = new_el->stack_ptr_data; |
| init_mic_address(m_stack_ptr_data); |
| OFFLOAD_TRACE(3, "Allocating stack buffer with addr %p\n", |
| m_stack_ptr_data->mic_addr); |
| m_device.m_persist_list.push_front(*new_el); |
| init_mic_address(new_el->stack_ptr_data); |
| *is_new = true; |
| return true; |
| } |
| |
| bool OffloadDescriptor::setup_descriptors( |
| VarDesc *vars, |
| VarDesc2 *vars2, |
| int vars_total, |
| int entry_id, |
| const void *stack_addr |
| ) |
| { |
| COIRESULT res; |
| |
| OffloadTimer timer(get_timer_data(), c_offload_host_setup_buffers); |
| |
| // make a copy of variable descriptors |
| m_vars_total = vars_total; |
| if (vars_total > 0) { |
| m_vars = (VarDesc*) malloc(m_vars_total * sizeof(VarDesc)); |
| memcpy(m_vars, vars, m_vars_total * sizeof(VarDesc)); |
| m_vars_extra = (VarExtra*) malloc(m_vars_total * sizeof(VarExtra)); |
| } |
| |
| // dependencies |
| m_in_deps = (COIEVENT*) malloc(sizeof(COIEVENT) * (m_vars_total + 1)); |
| if (m_vars_total > 0) { |
| m_out_deps = (COIEVENT*) malloc(sizeof(COIEVENT) * m_vars_total); |
| } |
| |
| // copyin/copyout data length |
| m_in_datalen = 0; |
| m_out_datalen = 0; |
| |
| // First pass over variable descriptors |
| // - Calculate size of the input and output non-pointer data |
| // - Allocate buffers for input and output pointers |
| for (int i = 0; i < m_vars_total; i++) { |
| void* alloc_base = NULL; |
| int64_t alloc_disp = 0; |
| int64_t alloc_size; |
| bool src_is_for_mic = (m_vars[i].direction.out || |
| m_vars[i].into == NULL); |
| |
| const char *var_sname = ""; |
| if (vars2 != NULL && i < vars_total) { |
| if (vars2[i].sname != NULL) { |
| var_sname = vars2[i].sname; |
| } |
| } |
| OFFLOAD_TRACE(2, " VarDesc %d, var=%s, %s, %s\n", |
| i, var_sname, |
| vardesc_direction_as_string[m_vars[i].direction.bits], |
| vardesc_type_as_string[m_vars[i].type.src]); |
| if (vars2 != NULL && i < vars_total && vars2[i].dname != NULL) { |
| OFFLOAD_TRACE(2, " into=%s, %s\n", vars2[i].dname, |
| vardesc_type_as_string[m_vars[i].type.dst]); |
| } |
| OFFLOAD_TRACE(2, |
| " type_src=%d, type_dstn=%d, direction=%d, " |
| "alloc_if=%d, free_if=%d, align=%d, mic_offset=%d, flags=0x%x, " |
| "offset=%lld, size=%lld, count/disp=%lld, ptr=%p, into=%p\n", |
| m_vars[i].type.src, |
| m_vars[i].type.dst, |
| m_vars[i].direction.bits, |
| m_vars[i].alloc_if, |
| m_vars[i].free_if, |
| m_vars[i].align, |
| m_vars[i].mic_offset, |
| m_vars[i].flags.bits, |
| m_vars[i].offset, |
| m_vars[i].size, |
| m_vars[i].count, |
| m_vars[i].ptr, |
| m_vars[i].into); |
| |
| if (m_vars[i].alloc != NULL) { |
| // array descriptor |
| const arr_desc *ap = |
| static_cast<const arr_desc*>(m_vars[i].alloc); |
| |
| // debug dump |
| __arr_desc_dump(" ", "ALLOC", ap, 0); |
| |
| __arr_data_offset_and_length(ap, alloc_disp, alloc_size); |
| |
| alloc_base = reinterpret_cast<void*>(ap->base); |
| } |
| |
| m_vars_extra[i].cpu_disp = 0; |
| m_vars_extra[i].cpu_offset = 0; |
| m_vars_extra[i].src_data = 0; |
| m_vars_extra[i].read_rng_src = 0; |
| m_vars_extra[i].read_rng_dst = 0; |
| // flag is_arr_ptr_el is 1 only for var_descs generated |
| // for c_data_ptr_array type |
| if (i < vars_total) { |
| m_vars_extra[i].is_arr_ptr_el = 0; |
| } |
| |
| switch (m_vars[i].type.src) { |
| case c_data_ptr_array: |
| { |
| const arr_desc *ap; |
| const VarDesc3 *vd3 = |
| static_cast<const VarDesc3*>(m_vars[i].ptr); |
| int flags = vd3->array_fields; |
| OFFLOAD_TRACE(2, |
| " pointer array flags = %04x\n", flags); |
| OFFLOAD_TRACE(2, |
| " pointer array type is %s\n", |
| vardesc_type_as_string[flags & 0x3f]); |
| ap = static_cast<const arr_desc*>(vd3->ptr_array); |
| __arr_desc_dump(" ", "ptr array", ap, 0); |
| if (m_vars[i].into) { |
| ap = static_cast<const arr_desc*>(m_vars[i].into); |
| __arr_desc_dump( |
| " ", "into array", ap, 0); |
| } |
| if ((flags & (1<<flag_align_is_array)) != 0) { |
| ap = static_cast<const arr_desc*>(vd3->align_array); |
| __arr_desc_dump( |
| " ", "align array", ap, 0); |
| } |
| if ((flags & (1<<flag_alloc_if_is_array)) != 0) { |
| ap = static_cast<const arr_desc*>(vd3->alloc_if_array); |
| __arr_desc_dump( |
| " ", "alloc_if array", ap, 0); |
| } |
| if ((flags & (1<<flag_free_if_is_array)) != 0) { |
| ap = static_cast<const arr_desc*>(vd3->free_if_array); |
| __arr_desc_dump( |
| " ", "free_if array", ap, 0); |
| } |
| if ((flags & (1<<flag_extent_start_is_array)) != 0) { |
| ap = static_cast<const arr_desc*>(vd3->extent_start); |
| __arr_desc_dump( |
| " ", "extent_start array", ap, 0); |
| } else if ((flags & |
| (1<<flag_extent_start_is_scalar)) != 0) { |
| OFFLOAD_TRACE(2, |
| " extent_start scalar = %d\n", |
| (int64_t)vd3->extent_start); |
| } |
| if ((flags & (1<<flag_extent_elements_is_array)) != 0) { |
| ap = static_cast<const arr_desc*> |
| (vd3->extent_elements); |
| __arr_desc_dump( |
| " ", "extent_elements array", ap, 0); |
| } else if ((flags & |
| (1<<flag_extent_elements_is_scalar)) != 0) { |
| OFFLOAD_TRACE(2, |
| " extent_elements scalar = %d\n", |
| (int64_t)vd3->extent_elements); |
| } |
| if ((flags & (1<<flag_into_start_is_array)) != 0) { |
| ap = static_cast<const arr_desc*>(vd3->into_start); |
| __arr_desc_dump( |
| " ", "into_start array", ap, 0); |
| } else if ((flags & |
| (1<<flag_into_start_is_scalar)) != 0) { |
| OFFLOAD_TRACE(2, |
| " into_start scalar = %d\n", |
| (int64_t)vd3->into_start); |
| } |
| if ((flags & (1<<flag_into_elements_is_array)) != 0) { |
| ap = static_cast<const arr_desc*>(vd3->into_elements); |
| __arr_desc_dump( |
| " ", "into_elements array", ap, 0); |
| } else if ((flags & |
| (1<<flag_into_elements_is_scalar)) != 0) { |
| OFFLOAD_TRACE(2, |
| " into_elements scalar = %d\n", |
| (int64_t)vd3->into_elements); |
| } |
| if ((flags & (1<<flag_alloc_start_is_array)) != 0) { |
| ap = static_cast<const arr_desc*>(vd3->alloc_start); |
| __arr_desc_dump( |
| " ", "alloc_start array", ap, 0); |
| } else if ((flags & |
| (1<<flag_alloc_start_is_scalar)) != 0) { |
| OFFLOAD_TRACE(2, |
| " alloc_start scalar = %d\n", |
| (int64_t)vd3->alloc_start); |
| } |
| if ((flags & (1<<flag_alloc_elements_is_array)) != 0) { |
| ap = static_cast<const arr_desc*>(vd3->alloc_elements); |
| __arr_desc_dump( |
| " ", "alloc_elements array", ap, 0); |
| } else if ((flags & |
| (1<<flag_alloc_elements_is_scalar)) != 0) { |
| OFFLOAD_TRACE(2, |
| " alloc_elements scalar = %d\n", |
| (int64_t)vd3->alloc_elements); |
| } |
| } |
| if (!gen_var_descs_for_pointer_array(i)) { |
| return false; |
| } |
| break; |
| |
| case c_data: |
| case c_void_ptr: |
| case c_cean_var: |
| // In all uses later |
| // VarDesc.size will have the length of the data to be |
| // transferred |
| // VarDesc.disp will have an offset from base |
| if (m_vars[i].type.src == c_cean_var) { |
| // array descriptor |
| const arr_desc *ap = |
| static_cast<const arr_desc*>(m_vars[i].ptr); |
| |
| // debug dump |
| __arr_desc_dump("", "IN/OUT", ap, 0); |
| |
| // offset and length are derived from the array descriptor |
| __arr_data_offset_and_length(ap, m_vars[i].disp, |
| m_vars[i].size); |
| if (!is_arr_desc_contiguous(ap)) { |
| m_vars[i].flags.is_noncont_src = 1; |
| m_vars_extra[i].read_rng_src = |
| init_read_ranges_arr_desc(ap); |
| } |
| // all necessary information about length and offset is |
| // transferred in var descriptor. There is no need to send |
| // array descriptor to the target side. |
| m_vars[i].ptr = reinterpret_cast<void*>(ap->base); |
| } |
| else { |
| m_vars[i].size *= m_vars[i].count; |
| m_vars[i].disp = 0; |
| } |
| |
| if (m_vars[i].direction.bits) { |
| // make sure that transfer size > 0 |
| if (m_vars[i].size <= 0) { |
| LIBOFFLOAD_ERROR(c_zero_or_neg_transfer_size); |
| exit(1); |
| } |
| |
| if (m_vars[i].flags.is_static) { |
| PtrData *ptr_data; |
| |
| // find data associated with variable |
| if (!find_ptr_data(ptr_data, |
| m_vars[i].ptr, |
| m_vars[i].disp, |
| m_vars[i].size, |
| false)) { |
| return false; |
| } |
| |
| if (ptr_data != 0) { |
| // offset to base from the beginning of the buffer |
| // memory |
| m_vars[i].offset = |
| (char*) m_vars[i].ptr - |
| (char*) ptr_data->cpu_addr.start(); |
| } |
| else { |
| m_vars[i].flags.is_static = false; |
| if (m_vars[i].into == NULL) { |
| m_vars[i].flags.is_static_dstn = false; |
| } |
| } |
| m_vars_extra[i].src_data = ptr_data; |
| } |
| |
| if (m_is_openmp) { |
| if (m_vars[i].flags.is_static) { |
| // Static data is transferred only by omp target |
| // update construct which passes zeros for |
| // alloc_if and free_if. |
| if (m_vars[i].alloc_if || m_vars[i].free_if) { |
| m_vars[i].direction.bits = c_parameter_nocopy; |
| } |
| } |
| else { |
| AutoData *auto_data; |
| if (m_vars[i].alloc_if) { |
| auto_data = m_device.insert_auto_data( |
| m_vars[i].ptr, m_vars[i].size); |
| auto_data->add_reference(); |
| } |
| else { |
| // TODO: what should be done if var is not in |
| // the table? |
| auto_data = m_device.find_auto_data( |
| m_vars[i].ptr); |
| } |
| |
| // For automatic variables data is transferred |
| // only if alloc_if == 0 && free_if == 0 |
| // or reference count is 1 |
| if ((m_vars[i].alloc_if || m_vars[i].free_if) && |
| auto_data != 0 && |
| auto_data->get_reference() != 1) { |
| m_vars[i].direction.bits = c_parameter_nocopy; |
| } |
| |
| // save data for later use |
| m_vars_extra[i].auto_data = auto_data; |
| } |
| } |
| |
| if (m_vars[i].direction.in && |
| !m_vars[i].flags.is_static) { |
| m_in_datalen += m_vars[i].size; |
| |
| // for non-static target destination defined as CEAN |
| // expression we pass to target its size and dist |
| if (m_vars[i].into == NULL && |
| m_vars[i].type.src == c_cean_var) { |
| m_in_datalen += 2 * sizeof(uint64_t); |
| } |
| m_need_runfunction = true; |
| } |
| if (m_vars[i].direction.out && |
| !m_vars[i].flags.is_static) { |
| m_out_datalen += m_vars[i].size; |
| m_need_runfunction = true; |
| } |
| } |
| break; |
| |
| case c_dv: |
| if (m_vars[i].direction.bits || |
| m_vars[i].alloc_if || |
| m_vars[i].free_if) { |
| ArrDesc *dvp = static_cast<ArrDesc*>(m_vars[i].ptr); |
| |
| // debug dump |
| __dv_desc_dump("IN/OUT", dvp); |
| |
| // send dope vector contents excluding base |
| m_in_datalen += m_vars[i].size - sizeof(uint64_t); |
| m_need_runfunction = true; |
| } |
| break; |
| |
| case c_string_ptr: |
| if ((m_vars[i].direction.bits || |
| m_vars[i].alloc_if || |
| m_vars[i].free_if) && |
| m_vars[i].size == 0) { |
| m_vars[i].size = 1; |
| m_vars[i].count = |
| strlen(*static_cast<char**>(m_vars[i].ptr)) + 1; |
| } |
| /* fallthru */ |
| |
| case c_data_ptr: |
| if (m_vars[i].flags.is_stack_buf && |
| !m_vars[i].direction.bits && |
| m_vars[i].alloc_if) { |
| // this var_desc is for stack buffer |
| bool is_new; |
| |
| if (!offload_stack_memory_manager( |
| stack_addr, entry_id, |
| m_vars[i].count, m_vars[i].align, &is_new)) { |
| return false; |
| } |
| if (is_new) { |
| m_compute_buffers.push_back( |
| m_stack_ptr_data->mic_buf); |
| m_device.m_persist_list.front().cpu_stack_addr = |
| static_cast<char*>(m_vars[i].ptr); |
| } |
| else { |
| m_vars[i].flags.sink_addr = 1; |
| m_in_datalen += sizeof(m_stack_ptr_data->mic_addr); |
| } |
| m_vars[i].size = m_destroy_stack.size(); |
| m_vars_extra[i].src_data = m_stack_ptr_data; |
| // need to add reference for buffer |
| m_need_runfunction = true; |
| break; |
| } |
| /* fallthru */ |
| |
| case c_cean_var_ptr: |
| case c_dv_ptr: |
| if (m_vars[i].type.src == c_cean_var_ptr) { |
| // array descriptor |
| const arr_desc *ap = |
| static_cast<const arr_desc*>(m_vars[i].ptr); |
| |
| // debug dump |
| __arr_desc_dump("", "IN/OUT", ap, 1); |
| |
| // offset and length are derived from the array descriptor |
| __arr_data_offset_and_length(ap, m_vars[i].disp, |
| m_vars[i].size); |
| |
| if (!is_arr_desc_contiguous(ap)) { |
| m_vars[i].flags.is_noncont_src = 1; |
| m_vars_extra[i].read_rng_src = |
| init_read_ranges_arr_desc(ap); |
| } |
| // all necessary information about length and offset is |
| // transferred in var descriptor. There is no need to send |
| // array descriptor to the target side. |
| m_vars[i].ptr = reinterpret_cast<void*>(ap->base); |
| } |
| else if (m_vars[i].type.src == c_dv_ptr) { |
| // need to send DV to the device unless it is 'nocopy' |
| if (m_vars[i].direction.bits || |
| m_vars[i].alloc_if || |
| m_vars[i].free_if) { |
| ArrDesc *dvp = *static_cast<ArrDesc**>(m_vars[i].ptr); |
| |
| // debug dump |
| __dv_desc_dump("IN/OUT", dvp); |
| |
| m_vars[i].direction.bits = c_parameter_in; |
| } |
| |
| // no displacement |
| m_vars[i].disp = 0; |
| } |
| else { |
| // c_data_ptr or c_string_ptr |
| m_vars[i].size *= m_vars[i].count; |
| m_vars[i].disp = 0; |
| } |
| |
| if (m_vars[i].direction.bits || |
| m_vars[i].alloc_if || |
| m_vars[i].free_if) { |
| PtrData *ptr_data; |
| |
| // check that buffer length >= 0 |
| if (m_vars[i].alloc_if && |
| m_vars[i].disp + m_vars[i].size < 0) { |
| LIBOFFLOAD_ERROR(c_zero_or_neg_ptr_len); |
| exit(1); |
| } |
| |
| // base address |
| void *base = *static_cast<void**>(m_vars[i].ptr); |
| |
| // allocate buffer if we have no INTO and don't need |
| // allocation for the ptr at target |
| if (src_is_for_mic) { |
| if (m_vars[i].flags.is_stack_buf) { |
| // for stack persistent objects ptr data is created |
| // by var_desc with number 0. |
| // Its ptr_data is stored at m_stack_ptr_data |
| ptr_data = m_stack_ptr_data; |
| m_vars[i].flags.sink_addr = 1; |
| } |
| else if (m_vars[i].alloc_if) { |
| // add new entry |
| if (!alloc_ptr_data( |
| ptr_data, |
| base, |
| (alloc_base != NULL) ? |
| alloc_disp : m_vars[i].disp, |
| (alloc_base != NULL) ? |
| alloc_size : m_vars[i].size, |
| alloc_disp, |
| (alloc_base != NULL) ? |
| 0 : m_vars[i].align)) { |
| return false; |
| } |
| |
| if (ptr_data->add_reference() == 0 && |
| ptr_data->mic_buf != 0) { |
| // add buffer to the list of buffers that |
| // are passed to dispatch call |
| m_compute_buffers.push_back( |
| ptr_data->mic_buf); |
| } |
| else { |
| // will send buffer address to device |
| m_vars[i].flags.sink_addr = 1; |
| } |
| |
| if (!ptr_data->is_static) { |
| // need to add reference for buffer |
| m_need_runfunction = true; |
| } |
| } |
| else { |
| bool error_if_not_found = true; |
| if (m_is_openmp) { |
| // For omp target update variable is ignored |
| // if it does not exist. |
| if (!m_vars[i].alloc_if && |
| !m_vars[i].free_if) { |
| error_if_not_found = false; |
| } |
| } |
| |
| // use existing association from pointer table |
| if (!find_ptr_data(ptr_data, |
| base, |
| m_vars[i].disp, |
| m_vars[i].size, |
| error_if_not_found)) { |
| return false; |
| } |
| |
| if (m_is_openmp) { |
| // make var nocopy if it does not exist |
| if (ptr_data == 0) { |
| m_vars[i].direction.bits = |
| c_parameter_nocopy; |
| } |
| } |
| |
| if (ptr_data != 0) { |
| m_vars[i].flags.sink_addr = 1; |
| } |
| } |
| |
| if (ptr_data != 0) { |
| if (m_is_openmp) { |
| // data is transferred only if |
| // alloc_if == 0 && free_if == 0 |
| // or reference count is 1 |
| if ((m_vars[i].alloc_if || |
| m_vars[i].free_if) && |
| ptr_data->get_reference() != 1) { |
| m_vars[i].direction.bits = |
| c_parameter_nocopy; |
| } |
| } |
| |
| if (ptr_data->alloc_disp != 0) { |
| m_vars[i].flags.alloc_disp = 1; |
| m_in_datalen += sizeof(alloc_disp); |
| } |
| |
| if (m_vars[i].flags.sink_addr) { |
| // get buffers's address on the sink |
| if (!init_mic_address(ptr_data)) { |
| return false; |
| } |
| |
| m_in_datalen += sizeof(ptr_data->mic_addr); |
| } |
| |
| if (!ptr_data->is_static && m_vars[i].free_if) { |
| // need to decrement buffer reference on target |
| m_need_runfunction = true; |
| } |
| |
| // offset to base from the beginning of the buffer |
| // memory |
| m_vars[i].offset = (char*) base - |
| (char*) ptr_data->cpu_addr.start(); |
| |
| // copy other pointer properties to var descriptor |
| m_vars[i].mic_offset = ptr_data->mic_offset; |
| m_vars[i].flags.is_static = ptr_data->is_static; |
| } |
| } |
| else { |
| if (!find_ptr_data(ptr_data, |
| base, |
| m_vars[i].disp, |
| m_vars[i].size, |
| false)) { |
| return false; |
| } |
| if (ptr_data) { |
| m_vars[i].offset = |
| (char*) base - |
| (char*) ptr_data->cpu_addr.start(); |
| } |
| } |
| |
| // save pointer data |
| m_vars_extra[i].src_data = ptr_data; |
| } |
| break; |
| |
| case c_func_ptr: |
| if (m_vars[i].direction.in) { |
| m_in_datalen += __offload_funcs.max_name_length(); |
| } |
| if (m_vars[i].direction.out) { |
| m_out_datalen += __offload_funcs.max_name_length(); |
| } |
| m_need_runfunction = true; |
| break; |
| |
| case c_dv_data: |
| case c_dv_ptr_data: |
| case c_dv_data_slice: |
| case c_dv_ptr_data_slice: |
| ArrDesc *dvp; |
| if (VAR_TYPE_IS_DV_DATA_SLICE(m_vars[i].type.src)) { |
| const arr_desc *ap; |
| ap = static_cast<const arr_desc*>(m_vars[i].ptr); |
| |
| dvp = (m_vars[i].type.src == c_dv_data_slice) ? |
| reinterpret_cast<ArrDesc*>(ap->base) : |
| *reinterpret_cast<ArrDesc**>(ap->base); |
| } |
| else { |
| dvp = (m_vars[i].type.src == c_dv_data) ? |
| static_cast<ArrDesc*>(m_vars[i].ptr) : |
| *static_cast<ArrDesc**>(m_vars[i].ptr); |
| } |
| |
| // if allocatable dope vector isn't allocated don't |
| // transfer its data |
| if (!__dv_is_allocated(dvp)) { |
| m_vars[i].direction.bits = c_parameter_nocopy; |
| m_vars[i].alloc_if = 0; |
| m_vars[i].free_if = 0; |
| } |
| if (m_vars[i].direction.bits || |
| m_vars[i].alloc_if || |
| m_vars[i].free_if) { |
| const arr_desc *ap; |
| |
| if (VAR_TYPE_IS_DV_DATA_SLICE(m_vars[i].type.src)) { |
| ap = static_cast<const arr_desc*>(m_vars[i].ptr); |
| |
| // debug dump |
| __arr_desc_dump("", "IN/OUT", ap, 0); |
| } |
| if (!__dv_is_contiguous(dvp)) { |
| m_vars[i].flags.is_noncont_src = 1; |
| m_vars_extra[i].read_rng_src = |
| init_read_ranges_dv(dvp); |
| } |
| |
| // size and displacement |
| if (VAR_TYPE_IS_DV_DATA_SLICE(m_vars[i].type.src)) { |
| // offset and length are derived from the |
| // array descriptor |
| __arr_data_offset_and_length(ap, |
| m_vars[i].disp, |
| m_vars[i].size); |
| if (m_vars[i].direction.bits) { |
| if (!is_arr_desc_contiguous(ap)) { |
| if (m_vars[i].flags.is_noncont_src) { |
| LIBOFFLOAD_ERROR(c_slice_of_noncont_array); |
| return false; |
| } |
| m_vars[i].flags.is_noncont_src = 1; |
| m_vars_extra[i].read_rng_src = |
| init_read_ranges_arr_desc(ap); |
| } |
| } |
| } |
| else { |
| if (m_vars[i].flags.has_length) { |
| m_vars[i].size = |
| __dv_data_length(dvp, m_vars[i].count); |
| } |
| else { |
| m_vars[i].size = __dv_data_length(dvp); |
| } |
| m_vars[i].disp = 0; |
| } |
| |
| // check that length >= 0 |
| if (m_vars[i].alloc_if && |
| (m_vars[i].disp + m_vars[i].size < 0)) { |
| LIBOFFLOAD_ERROR(c_zero_or_neg_ptr_len); |
| exit(1); |
| } |
| |
| // base address |
| void *base = reinterpret_cast<void*>(dvp->Base); |
| PtrData *ptr_data; |
| |
| // allocate buffer if we have no INTO and don't need |
| // allocation for the ptr at target |
| if (src_is_for_mic) { |
| if (m_vars[i].alloc_if) { |
| // add new entry |
| if (!alloc_ptr_data( |
| ptr_data, |
| base, |
| (alloc_base != NULL) ? |
| alloc_disp : m_vars[i].disp, |
| (alloc_base != NULL) ? |
| alloc_size : m_vars[i].size, |
| alloc_disp, |
| (alloc_base != NULL) ? |
| 0 : m_vars[i].align)) { |
| return false; |
| } |
| |
| if (ptr_data->add_reference() == 0 && |
| ptr_data->mic_buf != 0) { |
| // add buffer to the list of buffers |
| // that are passed to dispatch call |
| m_compute_buffers.push_back( |
| ptr_data->mic_buf); |
| } |
| else { |
| // will send buffer address to device |
| m_vars[i].flags.sink_addr = 1; |
| } |
| |
| if (!ptr_data->is_static) { |
| // need to add reference for buffer |
| m_need_runfunction = true; |
| } |
| } |
| else { |
| bool error_if_not_found = true; |
| if (m_is_openmp) { |
| // For omp target update variable is ignored |
| // if it does not exist. |
| if (!m_vars[i].alloc_if && |
| !m_vars[i].free_if) { |
| error_if_not_found = false; |
| } |
| } |
| |
| // use existing association from pointer table |
| if (!find_ptr_data(ptr_data, |
| base, |
| m_vars[i].disp, |
| m_vars[i].size, |
| error_if_not_found)) { |
| return false; |
| } |
| |
| if (m_is_openmp) { |
| // make var nocopy if it does not exist |
| if (ptr_data == 0) { |
| m_vars[i].direction.bits = |
| c_parameter_nocopy; |
| } |
| } |
| |
| if (ptr_data != 0) { |
| // need to update base in dope vector on device |
| m_vars[i].flags.sink_addr = 1; |
| } |
| } |
| |
| if (ptr_data != 0) { |
| if (m_is_openmp) { |
| // data is transferred only if |
| // alloc_if == 0 && free_if == 0 |
| // or reference count is 1 |
| if ((m_vars[i].alloc_if || |
| m_vars[i].free_if) && |
| ptr_data->get_reference() != 1) { |
| m_vars[i].direction.bits = |
| c_parameter_nocopy; |
| } |
| } |
| |
| if (ptr_data->alloc_disp != 0) { |
| m_vars[i].flags.alloc_disp = 1; |
| m_in_datalen += sizeof(alloc_disp); |
| } |
| |
| if (m_vars[i].flags.sink_addr) { |
| // get buffers's address on the sink |
| if (!init_mic_address(ptr_data)) { |
| return false; |
| } |
| |
| m_in_datalen += sizeof(ptr_data->mic_addr); |
| } |
| |
| if (!ptr_data->is_static && m_vars[i].free_if) { |
| // need to decrement buffer reference on target |
| m_need_runfunction = true; |
| } |
| |
| // offset to base from the beginning of the buffer |
| // memory |
| m_vars[i].offset = |
| (char*) base - |
| (char*) ptr_data->cpu_addr.start(); |
| |
| // copy other pointer properties to var descriptor |
| m_vars[i].mic_offset = ptr_data->mic_offset; |
| m_vars[i].flags.is_static = ptr_data->is_static; |
| } |
| } |
| else { // !src_is_for_mic |
| if (!find_ptr_data(ptr_data, |
| base, |
| m_vars[i].disp, |
| m_vars[i].size, |
| false)) { |
| return false; |
| } |
| m_vars[i].offset = !ptr_data ? 0 : |
| (char*) base - |
| (char*) ptr_data->cpu_addr.start(); |
| } |
| |
| // save pointer data |
| m_vars_extra[i].src_data = ptr_data; |
| } |
| break; |
| |
| default: |
| LIBOFFLOAD_ERROR(c_unknown_var_type, m_vars[i].type.src); |
| LIBOFFLOAD_ABORT; |
| } |
| if (m_vars[i].type.src == c_data_ptr_array) { |
| continue; |
| } |
| |
| if (src_is_for_mic && m_vars[i].flags.is_stack_buf) { |
| m_vars[i].offset = static_cast<char*>(m_vars[i].ptr) - |
| m_device.m_persist_list.front().cpu_stack_addr; |
| } |
| // if source is used at CPU save its offset and disp |
| if (m_vars[i].into == NULL || m_vars[i].direction.in) { |
| m_vars_extra[i].cpu_offset = m_vars[i].offset; |
| m_vars_extra[i].cpu_disp = m_vars[i].disp; |
| } |
| |
| // If "into" is define we need to do the similar work for it |
| if (!m_vars[i].into) { |
| continue; |
| } |
| |
| int64_t into_disp =0, into_offset = 0; |
| |
| switch (m_vars[i].type.dst) { |
| case c_data_ptr_array: |
| break; |
| case c_data: |
| case c_void_ptr: |
| case c_cean_var: { |
| int64_t size = m_vars[i].size; |
| |
| if (m_vars[i].type.dst == c_cean_var) { |
| // array descriptor |
| const arr_desc *ap = |
| static_cast<const arr_desc*>(m_vars[i].into); |
| |
| // debug dump |
| __arr_desc_dump(" ", "INTO", ap, 0); |
| |
| // offset and length are derived from the array descriptor |
| __arr_data_offset_and_length(ap, into_disp, size); |
| |
| if (!is_arr_desc_contiguous(ap)) { |
| m_vars[i].flags.is_noncont_dst = 1; |
| m_vars_extra[i].read_rng_dst = |
| init_read_ranges_arr_desc(ap); |
| if (!cean_ranges_match( |
| m_vars_extra[i].read_rng_src, |
| m_vars_extra[i].read_rng_dst)) { |
| LIBOFFLOAD_ERROR(c_ranges_dont_match); |
| exit(1); |
| } |
| } |
| m_vars[i].into = reinterpret_cast<void*>(ap->base); |
| } |
| |
| int64_t size_src = m_vars_extra[i].read_rng_src ? |
| cean_get_transf_size(m_vars_extra[i].read_rng_src) : |
| m_vars[i].size; |
| int64_t size_dst = m_vars_extra[i].read_rng_dst ? |
| cean_get_transf_size(m_vars_extra[i].read_rng_dst) : |
| size; |
| // It's supposed that "into" size must be not less |
| // than src size |
| if (size_src > size_dst) { |
| LIBOFFLOAD_ERROR(c_different_src_and_dstn_sizes, |
| size_src, size_dst); |
| exit(1); |
| } |
| |
| if (m_vars[i].direction.bits) { |
| if (m_vars[i].flags.is_static_dstn) { |
| PtrData *ptr_data; |
| |
| // find data associated with variable |
| if (!find_ptr_data(ptr_data, m_vars[i].into, |
| into_disp, size, false)) { |
| return false; |
| } |
| if (ptr_data != 0) { |
| // offset to base from the beginning of the buffer |
| // memory |
| into_offset = |
| (char*) m_vars[i].into - |
| (char*) ptr_data->cpu_addr.start(); |
| } |
| else { |
| m_vars[i].flags.is_static_dstn = false; |
| } |
| m_vars_extra[i].dst_data = ptr_data; |
| } |
| } |
| |
| if (m_vars[i].direction.in && |
| !m_vars[i].flags.is_static_dstn) { |
| m_in_datalen += m_vars[i].size; |
| |
| // for non-static target destination defined as CEAN |
| // expression we pass to target its size and dist |
| if (m_vars[i].type.dst == c_cean_var) { |
| m_in_datalen += 2 * sizeof(uint64_t); |
| } |
| m_need_runfunction = true; |
| } |
| break; |
| } |
| |
| case c_dv: |
| if (m_vars[i].direction.bits || |
| m_vars[i].alloc_if || |
| m_vars[i].free_if) { |
| ArrDesc *dvp = static_cast<ArrDesc*>(m_vars[i].into); |
| |
| // debug dump |
| __dv_desc_dump("INTO", dvp); |
| |
| // send dope vector contents excluding base |
| m_in_datalen += m_vars[i].size - sizeof(uint64_t); |
| m_need_runfunction = true; |
| } |
| break; |
| |
| case c_string_ptr: |
| case c_data_ptr: |
| case c_cean_var_ptr: |
| case c_dv_ptr: { |
| int64_t size = m_vars[i].size; |
| |
| if (m_vars[i].type.dst == c_cean_var_ptr) { |
| // array descriptor |
| const arr_desc *ap = |
| static_cast<const arr_desc*>(m_vars[i].into); |
| |
| // debug dump |
| __arr_desc_dump(" ", "INTO", ap, 1); |
| |
| // offset and length are derived from the array descriptor |
| __arr_data_offset_and_length(ap, into_disp, size); |
| |
| if (!is_arr_desc_contiguous(ap)) { |
| m_vars[i].flags.is_noncont_src = 1; |
| m_vars_extra[i].read_rng_dst = |
| init_read_ranges_arr_desc(ap); |
| if (!cean_ranges_match( |
| m_vars_extra[i].read_rng_src, |
| m_vars_extra[i].read_rng_dst)) { |
| LIBOFFLOAD_ERROR(c_ranges_dont_match); |
| } |
| } |
| m_vars[i].into = reinterpret_cast<char**>(ap->base); |
| } |
| else if (m_vars[i].type.dst == c_dv_ptr) { |
| // need to send DV to the device unless it is 'nocopy' |
| if (m_vars[i].direction.bits || |
| m_vars[i].alloc_if || |
| m_vars[i].free_if) { |
| ArrDesc *dvp = *static_cast<ArrDesc**>(m_vars[i].into); |
| |
| // debug dump |
| __dv_desc_dump("INTO", dvp); |
| |
| m_vars[i].direction.bits = c_parameter_in; |
| } |
| } |
| |
| int64_t size_src = m_vars_extra[i].read_rng_src ? |
| cean_get_transf_size(m_vars_extra[i].read_rng_src) : |
| m_vars[i].size; |
| int64_t size_dst = m_vars_extra[i].read_rng_dst ? |
| cean_get_transf_size(m_vars_extra[i].read_rng_dst) : |
| size; |
| // It's supposed that "into" size must be not less than |
| // src size |
| if (size_src > size_dst) { |
| LIBOFFLOAD_ERROR(c_different_src_and_dstn_sizes, |
| size_src, size_dst); |
| exit(1); |
| } |
| |
| if (m_vars[i].direction.bits) { |
| PtrData *ptr_data; |
| |
| // base address |
| void *base = *static_cast<void**>(m_vars[i].into); |
| |
| if (m_vars[i].direction.in) { |
| // allocate buffer |
| if (m_vars[i].flags.is_stack_buf) { |
| // for stack persistent objects ptr data is created |
| // by var_desc with number 0. |
| // Its ptr_data is stored at m_stack_ptr_data |
| ptr_data = m_stack_ptr_data; |
| m_vars[i].flags.sink_addr = 1; |
| } |
| else if (m_vars[i].alloc_if) { |
| // add new entry |
| if (!alloc_ptr_data( |
| ptr_data, |
| base, |
| (alloc_base != NULL) ? |
| alloc_disp : into_disp, |
| (alloc_base != NULL) ? |
| alloc_size : size, |
| alloc_disp, |
| (alloc_base != NULL) ? |
| 0 : m_vars[i].align)) { |
| return false; |
| } |
| |
| if (ptr_data->add_reference() == 0 && |
| ptr_data->mic_buf != 0) { |
| // add buffer to the list of buffers that |
| // are passed to dispatch call |
| m_compute_buffers.push_back( |
| ptr_data->mic_buf); |
| } |
| else { |
| // will send buffer address to device |
| m_vars[i].flags.sink_addr = 1; |
| } |
| |
| if (!ptr_data->is_static) { |
| // need to add reference for buffer |
| m_need_runfunction = true; |
| } |
| } |
| else { |
| // use existing association from pointer table |
| if (!find_ptr_data(ptr_data, base, into_disp, size)) { |
| return false; |
| } |
| m_vars[i].flags.sink_addr = 1; |
| } |
| |
| if (ptr_data->alloc_disp != 0) { |
| m_vars[i].flags.alloc_disp = 1; |
| m_in_datalen += sizeof(alloc_disp); |
| } |
| |
| if (m_vars[i].flags.sink_addr) { |
| // get buffers's address on the sink |
| if (!init_mic_address(ptr_data)) { |
| return false; |
| } |
| |
| m_in_datalen += sizeof(ptr_data->mic_addr); |
| } |
| |
| if (!ptr_data->is_static && m_vars[i].free_if) { |
| // need to decrement buffer reference on target |
| m_need_runfunction = true; |
| } |
| |
| // copy other pointer properties to var descriptor |
| m_vars[i].mic_offset = ptr_data->mic_offset; |
| m_vars[i].flags.is_static_dstn = ptr_data->is_static; |
| } |
| else { |
| if (!find_ptr_data(ptr_data, |
| base, |
| into_disp, |
| m_vars[i].size, |
| false)) { |
| return false; |
| } |
| } |
| if (ptr_data) { |
| into_offset = ptr_data ? |
| (char*) base - |
| (char*) ptr_data->cpu_addr.start() : |
| 0; |
| } |
| // save pointer data |
| m_vars_extra[i].dst_data = ptr_data; |
| } |
| break; |
| } |
| |
| case c_func_ptr: |
| break; |
| |
| case c_dv_data: |
| case c_dv_ptr_data: |
| case c_dv_data_slice: |
| case c_dv_ptr_data_slice: |
| if (m_vars[i].direction.bits || |
| m_vars[i].alloc_if || |
| m_vars[i].free_if) { |
| const arr_desc *ap; |
| ArrDesc *dvp; |
| PtrData *ptr_data; |
| int64_t disp; |
| int64_t size; |
| |
| if (VAR_TYPE_IS_DV_DATA_SLICE(m_vars[i].type.dst)) { |
| ap = static_cast<const arr_desc*>(m_vars[i].into); |
| |
| // debug dump |
| __arr_desc_dump(" ", "INTO", ap, 0); |
| |
| dvp = (m_vars[i].type.dst == c_dv_data_slice) ? |
| reinterpret_cast<ArrDesc*>(ap->base) : |
| *reinterpret_cast<ArrDesc**>(ap->base); |
| } |
| else { |
| dvp = (m_vars[i].type.dst == c_dv_data) ? |
| static_cast<ArrDesc*>(m_vars[i].into) : |
| *static_cast<ArrDesc**>(m_vars[i].into); |
| } |
| if (!__dv_is_contiguous(dvp)) { |
| m_vars[i].flags.is_noncont_dst = 1; |
| m_vars_extra[i].read_rng_dst = |
| init_read_ranges_dv(dvp); |
| } |
| // size and displacement |
| if (VAR_TYPE_IS_DV_DATA_SLICE(m_vars[i].type.dst)) { |
| // offset and length are derived from the array |
| // descriptor |
| __arr_data_offset_and_length(ap, into_disp, size); |
| if (m_vars[i].direction.bits) { |
| if (!is_arr_desc_contiguous(ap)) { |
| if (m_vars[i].flags.is_noncont_dst) { |
| LIBOFFLOAD_ERROR(c_slice_of_noncont_array); |
| return false; |
| } |
| m_vars[i].flags.is_noncont_dst = 1; |
| m_vars_extra[i].read_rng_dst = |
| init_read_ranges_arr_desc(ap); |
| if (!cean_ranges_match( |
| m_vars_extra[i].read_rng_src, |
| m_vars_extra[i].read_rng_dst)) { |
| LIBOFFLOAD_ERROR(c_ranges_dont_match); |
| } |
| } |
| } |
| } |
| else { |
| if (m_vars[i].flags.has_length) { |
| size = __dv_data_length(dvp, m_vars[i].count); |
| } |
| else { |
| size = __dv_data_length(dvp); |
| } |
| disp = 0; |
| } |
| |
| int64_t size_src = |
| m_vars_extra[i].read_rng_src ? |
| cean_get_transf_size(m_vars_extra[i].read_rng_src) : |
| m_vars[i].size; |
| int64_t size_dst = |
| m_vars_extra[i].read_rng_dst ? |
| cean_get_transf_size(m_vars_extra[i].read_rng_dst) : |
| size; |
| // It's supposed that "into" size must be not less |
| // than src size |
| if (size_src > size_dst) { |
| LIBOFFLOAD_ERROR(c_different_src_and_dstn_sizes, |
| size_src, size_dst); |
| exit(1); |
| } |
| |
| // base address |
| void *base = reinterpret_cast<void*>(dvp->Base); |
| |
| // allocate buffer |
| if (m_vars[i].direction.in) { |
| if (m_vars[i].alloc_if) { |
| // add new entry |
| if (!alloc_ptr_data( |
| ptr_data, |
| base, |
| (alloc_base != NULL) ? |
| alloc_disp : into_disp, |
| (alloc_base != NULL) ? |
| alloc_size : size, |
| alloc_disp, |
| (alloc_base != NULL) ? |
| 0 : m_vars[i].align)) { |
| return false; |
| } |
| if (ptr_data->add_reference() == 0 && |
| ptr_data->mic_buf !=0) { |
| // add buffer to the list of buffers |
| // that are passed to dispatch call |
| m_compute_buffers.push_back( |
| ptr_data->mic_buf); |
| } |
| else { |
| // will send buffer address to device |
| m_vars[i].flags.sink_addr = 1; |
| } |
| |
| if (!ptr_data->is_static) { |
| // need to add reference for buffer |
| m_need_runfunction = true; |
| } |
| } |
| else { |
| // use existing association from pointer table |
| if (!find_ptr_data(ptr_data, base, into_disp, size)) { |
| return false; |
| } |
| |
| // need to update base in dope vector on device |
| m_vars[i].flags.sink_addr = 1; |
| } |
| |
| if (ptr_data->alloc_disp != 0) { |
| m_vars[i].flags.alloc_disp = 1; |
| m_in_datalen += sizeof(alloc_disp); |
| } |
| |
| if (m_vars[i].flags.sink_addr) { |
| // get buffers's address on the sink |
| if (!init_mic_address(ptr_data)) { |
| return false; |
| } |
| m_in_datalen += sizeof(ptr_data->mic_addr); |
| } |
| |
| if (!ptr_data->is_static && m_vars[i].free_if) { |
| // need to decrement buffer reference on target |
| m_need_runfunction = true; |
| } |
| |
| // offset to base from the beginning of the buffer |
| // memory |
| into_offset = |
| (char*) base - (char*) ptr_data->cpu_addr.start(); |
| |
| // copy other pointer properties to var descriptor |
| m_vars[i].mic_offset = ptr_data->mic_offset; |
| m_vars[i].flags.is_static_dstn = ptr_data->is_static; |
| } |
| else { // src_is_for_mic |
| if (!find_ptr_data(ptr_data, |
| base, |
| into_disp, |
| size, |
| false)) { |
| return false; |
| } |
| into_offset = !ptr_data ? |
| 0 : |
| (char*) base - (char*) ptr_data->cpu_addr.start(); |
| } |
| |
| // save pointer data |
| m_vars_extra[i].dst_data = ptr_data; |
| } |
| break; |
| |
| default: |
| LIBOFFLOAD_ERROR(c_unknown_var_type, m_vars[i].type.src); |
| LIBOFFLOAD_ABORT; |
| } |
| // if into is used at CPU save its offset and disp |
| if (m_vars[i].direction.out) { |
| m_vars_extra[i].cpu_offset = into_offset; |
| m_vars_extra[i].cpu_disp = into_disp; |
| } |
| else { |
| if (m_vars[i].flags.is_stack_buf) { |
| into_offset = static_cast<char*>(m_vars[i].into) - |
| m_device.m_persist_list.front().cpu_stack_addr; |
| } |
| m_vars[i].offset = into_offset; |
| m_vars[i].disp = into_disp; |
| } |
| } |
| |
| return true; |
| } |
| |
| bool OffloadDescriptor::setup_misc_data(const char *name) |
| { |
| OffloadTimer timer(get_timer_data(), c_offload_host_setup_misc_data); |
| |
| // we can skip run functon call together with wait if offloaded |
| // region is empty and there is no user defined non-pointer IN/OUT data |
| if (m_need_runfunction) { |
| // variable descriptors are sent as input data |
| m_in_datalen += m_vars_total * sizeof(VarDesc); |
| |
| // timer data is sent as a part of the output data |
| m_out_datalen += OFFLOAD_TIMER_DATALEN(); |
| |
| // max from input data and output data length |
| uint64_t data_len = m_in_datalen > m_out_datalen ? m_in_datalen : |
| m_out_datalen; |
| |
| // Misc data has the following layout |
| // <Function Descriptor> |
| // <Function Name> |
| // <In/Out Data> (optional) |
| // |
| // We can transfer copyin/copyout data in misc/return data which can |
| // be passed to run function call if its size does not exceed |
| // COI_PIPELINE_MAX_IN_MISC_DATA_LEN. Otherwise we have to allocate |
| // buffer for it. |
| |
| m_func_desc_size = sizeof(FunctionDescriptor) + strlen(name) + 1; |
| m_func_desc_size = (m_func_desc_size + 7) & ~7; |
| |
| int misc_data_offset = 0; |
| int misc_data_size = 0; |
| if (data_len > 0) { |
| if (m_func_desc_size + |
| m_in_datalen <= COI_PIPELINE_MAX_IN_MISC_DATA_LEN && |
| m_out_datalen <= COI_PIPELINE_MAX_IN_MISC_DATA_LEN) { |
| // use misc/return data for copyin/copyout |
| misc_data_offset = m_func_desc_size; |
| misc_data_size = data_len; |
| } |
| else { |
| OffloadTimer timer_buf(get_timer_data(), |
| c_offload_host_alloc_data_buffer); |
| |
| // send/receive data using buffer |
| COIRESULT res = COI::BufferCreate(data_len, |
| COI_BUFFER_NORMAL, |
| 0, 0, |
| 1, &m_device.get_process(), |
| &m_inout_buf); |
| if (res != COI_SUCCESS) { |
| if (m_status != 0) { |
| m_status->result = translate_coi_error(res); |
| return false; |
| } |
| report_coi_error(c_buf_create, res); |
| } |
| |
| m_compute_buffers.push_back(m_inout_buf); |
| m_destroy_buffers.push_back(m_inout_buf); |
| } |
| } |
| |
| // initialize function descriptor |
| m_func_desc = (FunctionDescriptor*) malloc(m_func_desc_size + |
| misc_data_size); |
| m_func_desc->console_enabled = console_enabled; |
| m_func_desc->timer_enabled = |
| timer_enabled || (offload_report_level && offload_report_enabled); |
| m_func_desc->offload_report_level = offload_report_level; |
| m_func_desc->offload_number = GET_OFFLOAD_NUMBER(get_timer_data()); |
| m_func_desc->in_datalen = m_in_datalen; |
| m_func_desc->out_datalen = m_out_datalen; |
| m_func_desc->vars_num = m_vars_total; |
| m_func_desc->data_offset = misc_data_offset; |
| |
| // append entry name |
| strcpy(m_func_desc->data, name); |
| } |
| |
| return true; |
| } |
| |
| bool OffloadDescriptor::wait_dependencies( |
| const void **waits, |
| int num_waits |
| ) |
| { |
| OffloadTimer timer(get_timer_data(), c_offload_host_wait_deps); |
| bool ret = true; |
| |
| for (int i = 0; i < num_waits; i++) { |
| |
| OffloadDescriptor *task = m_device.find_signal(waits[i], true); |
| if (task == 0) { |
| LIBOFFLOAD_ERROR(c_offload1, m_device.get_logical_index(), |
| waits[i]); |
| LIBOFFLOAD_ABORT; |
| } |
| |
| if (!task->offload_finish()) { |
| ret = false; |
| } |
| |
| task->cleanup(); |
| delete task; |
| } |
| |
| return ret; |
| } |
| |
| bool OffloadDescriptor::offload( |
| const char *name, |
| bool is_empty, |
| VarDesc *vars, |
| VarDesc2 *vars2, |
| int vars_total, |
| const void **waits, |
| int num_waits, |
| const void **signal, |
| int entry_id, |
| const void *stack_addr |
| ) |
| { |
| if (signal == 0) { |
| OFFLOAD_DEBUG_TRACE_1(1, |
| GET_OFFLOAD_NUMBER(get_timer_data()), |
| c_offload_init_func, |
| "Offload function %s, is_empty=%d, #varDescs=%d, " |
| "#waits=%d, signal=none\n", |
| name, is_empty, vars_total, num_waits); |
| OFFLOAD_REPORT(3, GET_OFFLOAD_NUMBER(get_timer_data()), |
| c_offload_sent_pointer_data, |
| "#Wait : %d \n", num_waits); |
| OFFLOAD_REPORT(3, GET_OFFLOAD_NUMBER(get_timer_data()), |
| c_offload_signal, |
| "none %d\n", 0); |
| } |
| else { |
| OFFLOAD_DEBUG_TRACE_1(1, |
| GET_OFFLOAD_NUMBER(get_timer_data()), |
| c_offload_init_func, |
| "Offload function %s, is_empty=%d, #varDescs=%d, " |
| "#waits=%d, signal=%p\n", |
| name, is_empty, vars_total, num_waits, |
| *signal); |
| |
| OFFLOAD_REPORT(3, GET_OFFLOAD_NUMBER(get_timer_data()), |
| c_offload_signal, |
| "%d\n", signal); |
| } |
| OFFLOAD_REPORT(3, GET_OFFLOAD_NUMBER(get_timer_data()), |
| c_offload_wait, |
| "#Wait : %d %p\n", num_waits, waits); |
| |
| if (m_status != 0) { |
| m_status->result = OFFLOAD_SUCCESS; |
| m_status->device_number = m_device.get_logical_index(); |
| } |
| |
| m_need_runfunction = !is_empty; |
| |
| // wait for dependencies to finish |
| if (!wait_dependencies(waits, num_waits)) { |
| cleanup(); |
| return false; |
| } |
| |
| // setup buffers |
| if (!setup_descriptors(vars, vars2, vars_total, entry_id, stack_addr)) { |
| cleanup(); |
| return false; |
| } |
| |
| // initiate send for pointers. Want to do it as early as possible. |
| if (!send_pointer_data(signal != 0)) { |
| cleanup(); |
| return false; |
| } |
| |
| // setup misc data for run function |
| if (!setup_misc_data(name)) { |
| cleanup(); |
| return false; |
| } |
| |
| // gather copyin data into buffer |
| if (!gather_copyin_data()) { |
| cleanup(); |
| return false; |
| } |
| |
| // Start the computation |
| if (!compute()) { |
| cleanup(); |
| return false; |
| } |
| |
| // initiate receive for pointers |
| if (!receive_pointer_data(signal != 0)) { |
| cleanup(); |
| return false; |
| } |
| |
| // if there is a signal save descriptor for the later use. |
| if (signal != 0) { |
| m_device.add_signal(*signal, this); |
| return true; |
| } |
| |
| // wait for the offload to finish. |
| if (!offload_finish()) { |
| cleanup(); |
| return false; |
| } |
| |
| cleanup(); |
| return true; |
| } |
| |
| bool OffloadDescriptor::offload_finish() |
| { |
| COIRESULT res; |
| |
| // wait for compute dependencies to become signaled |
| if (m_in_deps_total > 0) { |
| OffloadTimer timer(get_timer_data(), c_offload_host_wait_compute); |
| |
| if (__offload_active_wait) { |
| // keep CPU busy |
| do { |
| res = COI::EventWait(m_in_deps_total, m_in_deps, 0, 1, 0, 0); |
| } |
| while (res == COI_TIME_OUT_REACHED); |
| } |
| else { |
| res = COI::EventWait(m_in_deps_total, m_in_deps, -1, 1, 0, 0); |
| } |
| |
| if (res != COI_SUCCESS) { |
| if (m_status != 0) { |
| m_status->result = translate_coi_error(res); |
| return false; |
| } |
| report_coi_error(c_event_wait, res); |
| } |
| } |
| |
| // scatter copyout data received from target |
| if (!scatter_copyout_data()) { |
| return false; |
| } |
| // wait for receive dependencies to become signaled |
| if (m_out_deps_total > 0) { |
| OffloadTimer timer(get_timer_data(), c_offload_host_wait_buffers_reads); |
| |
| if (__offload_active_wait) { |
| // keep CPU busy |
| do { |
| res = COI::EventWait(m_out_deps_total, m_out_deps, 0, 1, 0, 0); |
| } |
| while (res == COI_TIME_OUT_REACHED); |
| } |
| else { |
| res = COI::EventWait(m_out_deps_total, m_out_deps, -1, 1, 0, 0); |
| } |
| |
| if (res != COI_SUCCESS) { |
| if (m_status != 0) { |
| m_status->result = translate_coi_error(res); |
| return false; |
| } |
| report_coi_error(c_event_wait, res); |
| } |
| } |
| |
| // destroy buffers |
| { |
| OffloadTimer timer(get_timer_data(), c_offload_host_destroy_buffers); |
| |
| for (BufferList::const_iterator it = m_destroy_buffers.begin(); |
| it != m_destroy_buffers.end(); it++) { |
| res = COI::BufferDestroy(*it); |
| if (res != COI_SUCCESS) { |
| if (m_status != 0) { |
| m_status->result = translate_coi_error(res); |
| return false; |
| } |
| report_coi_error(c_buf_destroy, res); |
| } |
| } |
| } |
| |
| return true; |
| } |
| |
| void OffloadDescriptor::cleanup() |
| { |
| // release device in orsl |
| ORSL::release(m_device.get_logical_index()); |
| |
| OFFLOAD_TIMER_STOP(get_timer_data(), c_offload_host_total_offload); |
| |
| // report stuff |
| Offload_Report_Epilog(get_timer_data()); |
| } |
| |
| bool OffloadDescriptor::is_signaled() |
| { |
| bool signaled = true; |
| COIRESULT res; |
| |
| // check compute and receive dependencies |
| if (m_in_deps_total > 0) { |
| res = COI::EventWait(m_in_deps_total, m_in_deps, 0, 1, 0, 0); |
| signaled = signaled && (res == COI_SUCCESS); |
| } |
| if (m_out_deps_total > 0) { |
| res = COI::EventWait(m_out_deps_total, m_out_deps, 0, 1, 0, 0); |
| signaled = signaled && (res == COI_SUCCESS); |
| } |
| |
| return signaled; |
| } |
| |
| // Send pointer data if source or destination or both of them are |
| // noncontiguous. There is guarantee that length of destination enough for |
| // transferred data. |
| bool OffloadDescriptor::send_noncontiguous_pointer_data( |
| int i, |
| PtrData* src_data, |
| PtrData* dst_data, |
| COIEVENT *event |
| ) |
| { |
| int64_t offset_src, offset_dst; |
| int64_t length_src, length_dst; |
| int64_t length_src_cur, length_dst_cur; |
| int64_t send_size, data_sent = 0; |
| COIRESULT res; |
| bool dst_is_empty = true; |
| bool src_is_empty = true; |
| |
| // Set length_src and length_dst |
| length_src = (m_vars_extra[i].read_rng_src) ? |
| m_vars_extra[i].read_rng_src->range_size : m_vars[i].size; |
| length_dst = !m_vars[i].into ? length_src : |
| (m_vars_extra[i].read_rng_dst) ? |
| m_vars_extra[i].read_rng_dst->range_size : m_vars[i].size; |
| send_size = (length_src < length_dst) ? length_src : length_dst; |
| |
| // consequently get contiguous ranges, |
| // define corresponded destination offset and send data |
| do { |
| if (src_is_empty) { |
| if (m_vars_extra[i].read_rng_src) { |
| if (!get_next_range(m_vars_extra[i].read_rng_src, |
| &offset_src)) { |
| // source ranges are over - nothing to send |
| break; |
| } |
| } |
| else if (data_sent == 0) { |
| offset_src = m_vars_extra[i].cpu_disp; |
| } |
| else { |
| break; |
| } |
| length_src_cur = length_src; |
| } |
| else { |
| // if source is contiguous or its contiguous range is greater |
| // than destination one |
| offset_src += send_size; |
| } |
| length_src_cur -= send_size; |
| src_is_empty = length_src_cur == 0; |
| |
| if (dst_is_empty) { |
| if (m_vars[i].into) { |
| if (m_vars_extra[i].read_rng_dst) { |
| if (!get_next_range(m_vars_extra[i].read_rng_dst, |
| &offset_dst)) { |
| // destination ranges are over |
| LIBOFFLOAD_ERROR(c_destination_is_over); |
| return false; |
| } |
| } |
| // into is contiguous. |
| else { |
| offset_dst = m_vars[i].disp; |
| } |
| length_dst_cur = length_dst; |
| } |
| // same as source |
| else { |
| offset_dst = offset_src; |
| length_dst_cur = length_src; |
| } |
| } |
| else { |
| // if destination is contiguous or its contiguous range is greater |
| // than source one |
| offset_dst += send_size; |
| } |
| length_dst_cur -= send_size; |
| dst_is_empty = length_dst_cur == 0; |
| |
| if (src_data != 0 && src_data->cpu_buf != 0) { |
| res = COI::BufferCopy( |
| dst_data->mic_buf, |
| src_data->cpu_buf, |
| m_vars[i].mic_offset - dst_data->alloc_disp + |
| m_vars[i].offset + offset_dst, |
| m_vars_extra[i].cpu_offset + offset_src, |
| send_size, |
| COI_COPY_UNSPECIFIED, |
| 0, 0, |
| event); |
| if (res != COI_SUCCESS) { |
| if (m_status != 0) { |
| m_status->result = translate_coi_error(res); |
| return false; |
| } |
| report_coi_error(c_buf_copy, res); |
| } |
| } |
| else { |
| char *base = offload_get_src_base(m_vars[i].ptr, |
| m_vars[i].type.src); |
| |
| res = COI::BufferWrite( |
| dst_data->mic_buf, |
| m_vars[i].mic_offset - dst_data->alloc_disp + |
| m_vars[i].offset + offset_dst, |
| base + offset_src, |
| send_size, |
| COI_COPY_UNSPECIFIED, |
| 0, 0, |
| event); |
| if (res != COI_SUCCESS) { |
| if (m_status != 0) { |
| m_status->result = translate_coi_error(res); |
| return false; |
| } |
| report_coi_error(c_buf_write, res); |
| } |
| } |
| data_sent += length_src; |
| } |
| while (true); |
| return true; |
| } |
| |
| bool OffloadDescriptor::send_pointer_data(bool is_async) |
| { |
| OffloadTimer timer(get_timer_data(), c_offload_host_send_pointers); |
| |
| uint64_t ptr_sent = 0; |
| COIRESULT res; |
| |
| // Initiate send for pointer data |
| for (int i = 0; i < m_vars_total; i++) { |
| switch (m_vars[i].type.dst) { |
| case c_data_ptr_array: |
| break; |
| case c_data: |
| case c_void_ptr: |
| case c_cean_var: |
| if (m_vars[i].direction.in && |
| m_vars[i].flags.is_static_dstn) { |
| COIEVENT *event = |
| (is_async || |
| m_vars[i].size >= __offload_use_async_buffer_write) ? |
| &m_in_deps[m_in_deps_total++] : 0; |
| PtrData* dst_data = m_vars[i].into ? |
| m_vars_extra[i].dst_data : |
| m_vars_extra[i].src_data; |
| PtrData* src_data = |
| VAR_TYPE_IS_PTR(m_vars[i].type.src) || |
| VAR_TYPE_IS_SCALAR(m_vars[i].type.src) && |
| m_vars[i].flags.is_static ? |
| m_vars_extra[i].src_data : 0; |
| |
| if (m_vars[i].flags.is_noncont_src || |
| m_vars[i].flags.is_noncont_dst) { |
| if (!send_noncontiguous_pointer_data( |
| i, src_data, dst_data, event)) { |
| return false; |
| } |
| } |
| else if (src_data != 0 && src_data->cpu_buf != 0) { |
| res = COI::BufferCopy( |
| dst_data->mic_buf, |
| src_data->cpu_buf, |
| m_vars[i].mic_offset - dst_data->alloc_disp + |
| m_vars[i].offset + m_vars[i].disp, |
| m_vars_extra[i].cpu_offset + |
| m_vars_extra[i].cpu_disp, |
| m_vars[i].size, |
| COI_COPY_UNSPECIFIED, |
| 0, 0, |
| event); |
| if (res != COI_SUCCESS) { |
| if (m_status != 0) { |
| m_status->result = translate_coi_error(res); |
| return false; |
| } |
| report_coi_error(c_buf_copy, res); |
| } |
| } |
| else { |
| char *base = offload_get_src_base(m_vars[i].ptr, |
| m_vars[i].type.src); |
| res = COI::BufferWrite( |
| dst_data->mic_buf, |
| m_vars[i].mic_offset - dst_data->alloc_disp + |
| m_vars[i].offset + m_vars[i].disp, |
| base + m_vars_extra[i].cpu_disp, |
| m_vars[i].size, |
| COI_COPY_UNSPECIFIED, |
| 0, 0, |
| event); |
| if (res != COI_SUCCESS) { |
| if (m_status != 0) { |
| m_status->result = translate_coi_error(res); |
| return false; |
| } |
| report_coi_error(c_buf_write, res); |
| } |
| } |
| ptr_sent += m_vars[i].size; |
| } |
| break; |
| |
| case c_string_ptr: |
| case c_data_ptr: |
| case c_cean_var_ptr: |
| case c_dv_ptr: |
| if (m_vars[i].direction.in && m_vars[i].size > 0) { |
| COIEVENT *event = |
| (is_async || |
| m_vars[i].size >= __offload_use_async_buffer_write) ? |
| &m_in_deps[m_in_deps_total++] : 0; |
| PtrData* dst_data = m_vars[i].into ? |
| m_vars_extra[i].dst_data : |
| m_vars_extra[i].src_data; |
| PtrData* src_data = |
| VAR_TYPE_IS_PTR(m_vars[i].type.src) || |
| VAR_TYPE_IS_SCALAR(m_vars[i].type.src) && |
| m_vars[i].flags.is_static ? |
| m_vars_extra[i].src_data : 0; |
| |
| if (m_vars[i].flags.is_noncont_src || |
| m_vars[i].flags.is_noncont_dst) { |
| send_noncontiguous_pointer_data( |
| i, src_data, dst_data, event); |
| } |
| else if (src_data != 0 && src_data->cpu_buf != 0) { |
| res = COI::BufferCopy( |
| dst_data->mic_buf, |
| src_data->cpu_buf, |
| m_vars[i].mic_offset - dst_data->alloc_disp + |
| m_vars[i].offset + m_vars[i].disp, |
| m_vars_extra[i].cpu_offset + |
| m_vars_extra[i].cpu_disp, |
| m_vars[i].size, |
| COI_COPY_UNSPECIFIED, |
| 0, 0, |
| event); |
| if (res != COI_SUCCESS) { |
| if (m_status != 0) { |
| m_status->result = translate_coi_error(res); |
| return false; |
| } |
| report_coi_error(c_buf_copy, res); |
| } |
| } |
| else { |
| char *base = offload_get_src_base(m_vars[i].ptr, |
| m_vars[i].type.src); |
| res = COI::BufferWrite( |
| dst_data->mic_buf, |
| m_vars[i].mic_offset - dst_data->alloc_disp + |
| m_vars[i].offset + m_vars[i].disp, |
| base + m_vars_extra[i].cpu_disp, |
| m_vars[i].size, |
| COI_COPY_UNSPECIFIED, |
| 0, 0, |
| event); |
| if (res != COI_SUCCESS) { |
| if (m_status != 0) { |
| m_status->result = translate_coi_error(res); |
| return false; |
| } |
| report_coi_error(c_buf_write, res); |
| } |
| } |
| |
| ptr_sent += m_vars[i].size; |
| } |
| break; |
| |
| case c_dv_data: |
| case c_dv_ptr_data: |
| if (m_vars[i].direction.in && |
| m_vars[i].size > 0) { |
| PtrData *ptr_data = m_vars[i].into ? |
| m_vars_extra[i].dst_data : |
| m_vars_extra[i].src_data; |
| PtrData* src_data = m_vars_extra[i].src_data; |
| |
| COIEVENT *event = |
| (is_async || |
| m_vars[i].size >= __offload_use_async_buffer_write) ? |
| &m_in_deps[m_in_deps_total++] : 0; |
| |
| if (m_vars[i].flags.is_noncont_src || |
| m_vars[i].flags.is_noncont_dst) { |
| send_noncontiguous_pointer_data( |
| i, src_data, ptr_data, event); |
| } |
| else if (src_data && src_data->cpu_buf != 0) { |
| res = COI::BufferCopy( |
| ptr_data->mic_buf, |
| src_data->cpu_buf, |
| m_vars[i].offset + ptr_data->mic_offset - |
| ptr_data->alloc_disp + |
| m_vars[i].disp, |
| m_vars_extra[i].cpu_offset + |
| m_vars_extra[i].cpu_disp, |
| m_vars[i].size, |
| COI_COPY_UNSPECIFIED, |
| 0, 0, |
| event); |
| if (res != COI_SUCCESS) { |
| if (m_status != 0) { |
| m_status->result = translate_coi_error(res); |
| return false; |
| } |
| report_coi_error(c_buf_copy, res); |
| } |
| } |
| else { |
| char *base = offload_get_src_base(m_vars[i].ptr, |
| m_vars[i].type.src); |
| res = COI::BufferWrite( |
| ptr_data->mic_buf, |
| ptr_data->mic_offset - ptr_data->alloc_disp + |
| m_vars[i].offset + m_vars[i].disp, |
| base + m_vars_extra[i].cpu_disp, |
| m_vars[i].size, |
| COI_COPY_UNSPECIFIED, |
| 0, 0, |
| event); |
| if (res != COI_SUCCESS) { |
| if (m_status != 0) { |
| m_status->result = translate_coi_error(res); |
| return false; |
| } |
| report_coi_error(c_buf_write, res); |
| } |
| } |
| ptr_sent += m_vars[i].size; |
| } |
| break; |
|