This tag contains habanalabs driver changes for v6.2:

- New feature of graceful hard-reset. Instead of immediately killing the
   user-process when a command submission times out, we wait a bit and give
   the user-process notification and let it try to close things gracefully,
   with the ability to retrieve debug information.
 
 - Enhance the EventFD mechanism. Add new events such as access to illegal
   address (RAZWI), page fault, device unavailable. In addition, change the
   event workqueue to be handled in a single-threaded workqueue.
 
 - Allow the control device to work during reset of the ASIC, to enable
   monitoring applications to continue getting the data.
 
 - Add handling for Gaudi2 with PCI revision 2.
 
 - Reduce severity of prints due to power/thermal events.
 
 - Change how we use the h/w to perform memory scrubbing in Gaudi2.
 
 - Multiple bug fixes, refactors and renames.
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCgAdFiEE7TEboABC71LctBLFZR1NuKta54AFAmN+QSEACgkQZR1NuKta
 54Ducgf+PsD85BWXlqWkTa2S8tn7h+OCETapAEY+JMRu1UB15ccLGZlH1O/L2try
 NBjUEcbzvS1KPYmNMubKXOIlacTJrukaoqvtMfLSe1+Y/zvfTpF1+LGLp39wSRo8
 R36A1VEQxalSZoFhQMERVoWBfjiVaW3ENqe3H9Vb/ab9QdMUzP4P4uaLsECtsLSy
 ft31ZcN+jPjhjSYC7xZjzd3KXvVqlQ/5TsXdX6nsxphrOUiKxT55Gsypkx5O4vt1
 Q4aw+v3Z0NgknDF90n7O90y/wgE3OqKHiKl+9l7lS/WkLhhaWknJ9zJlfLI8uiEH
 UjMku/EpH6SSN5hrCDQvtFaXJSgiWQ==
 =cHXD
 -----END PGP SIGNATURE-----

Merge tag 'misc-habanalabs-next-2022-11-23' of https://git.kernel.org/pub/scm/linux/kernel/git/ogabbay/linux into char-misc-next

Oded writes:

This tag contains habanalabs driver changes for v6.2:

- New feature of graceful hard-reset. Instead of immediately killing the
  user-process when a command submission times out, we wait a bit and give
  the user-process notification and let it try to close things gracefully,
  with the ability to retrieve debug information.

- Enhance the EventFD mechanism. Add new events such as access to illegal
  address (RAZWI), page fault, device unavailable. In addition, change the
  event workqueue to be handled in a single-threaded workqueue.

- Allow the control device to work during reset of the ASIC, to enable
  monitoring applications to continue getting the data.

- Add handling for Gaudi2 with PCI revision 2.

- Reduce severity of prints due to power/thermal events.

- Change how we use the h/w to perform memory scrubbing in Gaudi2.

- Multiple bug fixes, refactors and renames.

* tag 'misc-habanalabs-next-2022-11-23' of https://git.kernel.org/pub/scm/linux/kernel/git/ogabbay/linux: (63 commits)
  habanalabs: fix VA range calculation
  habanalabs: fail driver load if EEPROM errors detected
  habanalabs: make print of engines idle mask more readable
  habanalabs: clear non-released encapsulated signals
  habanalabs: don't put context in hl_encaps_handle_do_release_sob()
  habanalabs: print context refcount value if hard reset fails
  habanalabs: add RMWREG32_SHIFTED to set a val within a mask
  habanalabs: fix rc when new CPUCP opcodes are not supported
  habanalabs/gaudi2: added memset for the cq_size register
  habanalabs: added return value check for hl_fw_dynamic_send_clear_cmd()
  habanalabs: increase the size of busy engines mask
  habanalabs/gaudi2: change memory scrub mechanism
  habanalabs: extend process wait timeout in device fine
  habanalabs: check schedule_hard_reset correctly
  habanalabs: reset device if still in use when released
  habanalabs/gaudi2: return to reset upon SM SEI BRESP error
  habanalabs/gaudi2: don't enable entries in the MSIX_GW table
  habanalabs/gaudi2: remove redundant firmware version check
  habanalabs/gaudi: fix print for firmware-alive event
  habanalabs: fix print for out-of-sync and pkt-failure events
  ...
This commit is contained in:
Greg Kroah-Hartman 2022-11-29 13:19:29 +01:00
commit ae27e8869f
21 changed files with 1271 additions and 533 deletions

View File

@ -91,6 +91,13 @@ Description: Enables the root user to set the device to specific state.
Valid values are "disable", "enable", "suspend", "resume". Valid values are "disable", "enable", "suspend", "resume".
User can read this property to see the valid values User can read this property to see the valid values
What: /sys/kernel/debug/habanalabs/hl<n>/device_release_watchdog_timeout
Date: Oct 2022
KernelVersion: 6.2
Contact: ttayar@habana.ai
Description: The watchdog timeout value in seconds for a device relese upon
certain error cases, after which the device is reset.
What: /sys/kernel/debug/habanalabs/hl<n>/dma_size What: /sys/kernel/debug/habanalabs/hl<n>/dma_size
Date: Apr 2021 Date: Apr 2021
KernelVersion: 5.13 KernelVersion: 5.13

View File

@ -742,13 +742,11 @@ static void cs_do_release(struct kref *ref)
*/ */
if (hl_cs_cmpl->encaps_signals) if (hl_cs_cmpl->encaps_signals)
kref_put(&hl_cs_cmpl->encaps_sig_hdl->refcount, kref_put(&hl_cs_cmpl->encaps_sig_hdl->refcount,
hl_encaps_handle_do_release); hl_encaps_release_handle_and_put_ctx);
} }
if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT) if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT) && cs->encaps_signals)
&& cs->encaps_signals) kref_put(&cs->encaps_sig_hdl->refcount, hl_encaps_release_handle_and_put_ctx);
kref_put(&cs->encaps_sig_hdl->refcount,
hl_encaps_handle_do_release);
out: out:
/* Must be called before hl_ctx_put because inside we use ctx to get /* Must be called before hl_ctx_put because inside we use ctx to get
@ -798,7 +796,7 @@ out:
static void cs_timedout(struct work_struct *work) static void cs_timedout(struct work_struct *work)
{ {
struct hl_device *hdev; struct hl_device *hdev;
u64 event_mask; u64 event_mask = 0x0;
int rc; int rc;
struct hl_cs *cs = container_of(work, struct hl_cs, struct hl_cs *cs = container_of(work, struct hl_cs,
work_tdr.work); work_tdr.work);
@ -830,11 +828,7 @@ static void cs_timedout(struct work_struct *work)
if (rc) { if (rc) {
hdev->captured_err_info.cs_timeout.timestamp = ktime_get(); hdev->captured_err_info.cs_timeout.timestamp = ktime_get();
hdev->captured_err_info.cs_timeout.seq = cs->sequence; hdev->captured_err_info.cs_timeout.seq = cs->sequence;
event_mask |= HL_NOTIFIER_EVENT_CS_TIMEOUT;
event_mask = device_reset ? (HL_NOTIFIER_EVENT_CS_TIMEOUT |
HL_NOTIFIER_EVENT_DEVICE_RESET) : HL_NOTIFIER_EVENT_CS_TIMEOUT;
hl_notifier_event_send_all(hdev, event_mask);
} }
switch (cs->type) { switch (cs->type) {
@ -869,8 +863,12 @@ static void cs_timedout(struct work_struct *work)
cs_put(cs); cs_put(cs);
if (device_reset) if (device_reset) {
hl_device_reset(hdev, HL_DRV_RESET_TDR); event_mask |= HL_NOTIFIER_EVENT_DEVICE_RESET;
hl_device_cond_reset(hdev, HL_DRV_RESET_TDR, event_mask);
} else if (event_mask) {
hl_notifier_event_send_all(hdev, event_mask);
}
} }
static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx, static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
@ -1011,6 +1009,34 @@ static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs)
hl_complete_job(hdev, job); hl_complete_job(hdev, job);
} }
/*
* release_reserved_encaps_signals() - release reserved encapsulated signals.
* @hdev: pointer to habanalabs device structure
*
* Release reserved encapsulated signals which weren't un-reserved, or for which a CS with
* encapsulated signals wasn't submitted and thus weren't released as part of CS roll-back.
* For these signals need also to put the refcount of the H/W SOB which was taken at the
* reservation.
*/
static void release_reserved_encaps_signals(struct hl_device *hdev)
{
struct hl_ctx *ctx = hl_get_compute_ctx(hdev);
struct hl_cs_encaps_sig_handle *handle;
struct hl_encaps_signals_mgr *mgr;
u32 id;
if (!ctx)
return;
mgr = &ctx->sig_mgr;
idr_for_each_entry(&mgr->handles, handle, id)
if (handle->cs_seq == ULLONG_MAX)
kref_put(&handle->refcount, hl_encaps_release_handle_and_put_sob_ctx);
hl_ctx_put(ctx);
}
void hl_cs_rollback_all(struct hl_device *hdev, bool skip_wq_flush) void hl_cs_rollback_all(struct hl_device *hdev, bool skip_wq_flush)
{ {
int i; int i;
@ -1039,6 +1065,8 @@ void hl_cs_rollback_all(struct hl_device *hdev, bool skip_wq_flush)
} }
force_complete_multi_cs(hdev); force_complete_multi_cs(hdev);
release_reserved_encaps_signals(hdev);
} }
static void static void
@ -2001,6 +2029,8 @@ static int cs_ioctl_reserve_signals(struct hl_fpriv *hpriv,
*/ */
handle->pre_sob_val = prop->next_sob_val - handle->count; handle->pre_sob_val = prop->next_sob_val - handle->count;
handle->cs_seq = ULLONG_MAX;
*signals_count = prop->next_sob_val; *signals_count = prop->next_sob_val;
hdev->asic_funcs->hw_queues_unlock(hdev); hdev->asic_funcs->hw_queues_unlock(hdev);
@ -2350,10 +2380,8 @@ put_cs:
/* We finished with the CS in this function, so put the ref */ /* We finished with the CS in this function, so put the ref */
cs_put(cs); cs_put(cs);
free_cs_chunk_array: free_cs_chunk_array:
if (!wait_cs_submitted && cs_encaps_signals && handle_found && if (!wait_cs_submitted && cs_encaps_signals && handle_found && is_wait_cs)
is_wait_cs) kref_put(&encaps_sig_hdl->refcount, hl_encaps_release_handle_and_put_ctx);
kref_put(&encaps_sig_hdl->refcount,
hl_encaps_handle_do_release);
kfree(cs_chunk_array); kfree(cs_chunk_array);
out: out:
return rc; return rc;

View File

@ -9,48 +9,55 @@
#include <linux/slab.h> #include <linux/slab.h>
void hl_encaps_handle_do_release(struct kref *ref) static void encaps_handle_do_release(struct hl_cs_encaps_sig_handle *handle, bool put_hw_sob,
bool put_ctx)
{ {
struct hl_cs_encaps_sig_handle *handle =
container_of(ref, struct hl_cs_encaps_sig_handle, refcount);
struct hl_encaps_signals_mgr *mgr = &handle->ctx->sig_mgr; struct hl_encaps_signals_mgr *mgr = &handle->ctx->sig_mgr;
spin_lock(&mgr->lock); if (put_hw_sob)
idr_remove(&mgr->handles, handle->id);
spin_unlock(&mgr->lock);
hl_ctx_put(handle->ctx);
kfree(handle);
}
static void hl_encaps_handle_do_release_sob(struct kref *ref)
{
struct hl_cs_encaps_sig_handle *handle =
container_of(ref, struct hl_cs_encaps_sig_handle, refcount);
struct hl_encaps_signals_mgr *mgr = &handle->ctx->sig_mgr;
/* if we're here, then there was a signals reservation but cs with
* encaps signals wasn't submitted, so need to put refcount
* to hw_sob taken at the reservation.
*/
hw_sob_put(handle->hw_sob); hw_sob_put(handle->hw_sob);
spin_lock(&mgr->lock); spin_lock(&mgr->lock);
idr_remove(&mgr->handles, handle->id); idr_remove(&mgr->handles, handle->id);
spin_unlock(&mgr->lock); spin_unlock(&mgr->lock);
if (put_ctx)
hl_ctx_put(handle->ctx); hl_ctx_put(handle->ctx);
kfree(handle); kfree(handle);
} }
void hl_encaps_release_handle_and_put_ctx(struct kref *ref)
{
struct hl_cs_encaps_sig_handle *handle =
container_of(ref, struct hl_cs_encaps_sig_handle, refcount);
encaps_handle_do_release(handle, false, true);
}
static void hl_encaps_release_handle_and_put_sob(struct kref *ref)
{
struct hl_cs_encaps_sig_handle *handle =
container_of(ref, struct hl_cs_encaps_sig_handle, refcount);
encaps_handle_do_release(handle, true, false);
}
void hl_encaps_release_handle_and_put_sob_ctx(struct kref *ref)
{
struct hl_cs_encaps_sig_handle *handle =
container_of(ref, struct hl_cs_encaps_sig_handle, refcount);
encaps_handle_do_release(handle, true, true);
}
static void hl_encaps_sig_mgr_init(struct hl_encaps_signals_mgr *mgr) static void hl_encaps_sig_mgr_init(struct hl_encaps_signals_mgr *mgr)
{ {
spin_lock_init(&mgr->lock); spin_lock_init(&mgr->lock);
idr_init(&mgr->handles); idr_init(&mgr->handles);
} }
static void hl_encaps_sig_mgr_fini(struct hl_device *hdev, static void hl_encaps_sig_mgr_fini(struct hl_device *hdev, struct hl_encaps_signals_mgr *mgr)
struct hl_encaps_signals_mgr *mgr)
{ {
struct hl_cs_encaps_sig_handle *handle; struct hl_cs_encaps_sig_handle *handle;
struct idr *idp; struct idr *idp;
@ -58,11 +65,14 @@ static void hl_encaps_sig_mgr_fini(struct hl_device *hdev,
idp = &mgr->handles; idp = &mgr->handles;
/* The IDR is expected to be empty at this stage, because any left signal should have been
* released as part of CS roll-back.
*/
if (!idr_is_empty(idp)) { if (!idr_is_empty(idp)) {
dev_warn(hdev->dev, "device released while some encaps signals handles are still allocated\n"); dev_warn(hdev->dev,
"device released while some encaps signals handles are still allocated\n");
idr_for_each_entry(idp, handle, id) idr_for_each_entry(idp, handle, id)
kref_put(&handle->refcount, kref_put(&handle->refcount, hl_encaps_release_handle_and_put_sob);
hl_encaps_handle_do_release_sob);
} }
idr_destroy(&mgr->handles); idr_destroy(&mgr->handles);

View File

@ -1769,6 +1769,11 @@ void hl_debugfs_add_device(struct hl_device *hdev)
dev_entry, dev_entry,
&hl_timeout_locked_fops); &hl_timeout_locked_fops);
debugfs_create_u32("device_release_watchdog_timeout",
0644,
dev_entry->root,
&hdev->device_release_watchdog_timeout_sec);
for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) { for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
debugfs_create_file(hl_debugfs_list[i].name, debugfs_create_file(hl_debugfs_list[i].name,
0444, 0444,

View File

@ -12,11 +12,14 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/hwmon.h> #include <linux/hwmon.h>
#include <linux/vmalloc.h>
#include <trace/events/habanalabs.h> #include <trace/events/habanalabs.h>
#define HL_RESET_DELAY_USEC 10000 /* 10ms */ #define HL_RESET_DELAY_USEC 10000 /* 10ms */
#define HL_DEVICE_RELEASE_WATCHDOG_TIMEOUT_SEC 5
enum dma_alloc_type { enum dma_alloc_type {
DMA_ALLOC_COHERENT, DMA_ALLOC_COHERENT,
DMA_ALLOC_CPU_ACCESSIBLE, DMA_ALLOC_CPU_ACCESSIBLE,
@ -31,6 +34,7 @@ enum dma_alloc_type {
* @hdev: pointer to habanalabs device structure. * @hdev: pointer to habanalabs device structure.
* @addr: the address the caller wants to access. * @addr: the address the caller wants to access.
* @region: the PCI region. * @region: the PCI region.
* @new_bar_region_base: the new BAR region base address.
* *
* @return: the old BAR base address on success, U64_MAX for failure. * @return: the old BAR base address on success, U64_MAX for failure.
* The caller should set it back to the old address after use. * The caller should set it back to the old address after use.
@ -40,7 +44,8 @@ enum dma_alloc_type {
* This function can be called also if the bar doesn't need to be set, * This function can be called also if the bar doesn't need to be set,
* in that case it just won't change the base. * in that case it just won't change the base.
*/ */
static u64 hl_set_dram_bar(struct hl_device *hdev, u64 addr, struct pci_mem_region *region) static u64 hl_set_dram_bar(struct hl_device *hdev, u64 addr, struct pci_mem_region *region,
u64 *new_bar_region_base)
{ {
struct asic_fixed_properties *prop = &hdev->asic_prop; struct asic_fixed_properties *prop = &hdev->asic_prop;
u64 bar_base_addr, old_base; u64 bar_base_addr, old_base;
@ -54,27 +59,28 @@ static u64 hl_set_dram_bar(struct hl_device *hdev, u64 addr, struct pci_mem_regi
old_base = hdev->asic_funcs->set_dram_bar_base(hdev, bar_base_addr); old_base = hdev->asic_funcs->set_dram_bar_base(hdev, bar_base_addr);
/* in case of success we need to update the new BAR base */ /* in case of success we need to update the new BAR base */
if (old_base != U64_MAX) if ((old_base != U64_MAX) && new_bar_region_base)
region->region_base = bar_base_addr; *new_bar_region_base = bar_base_addr;
return old_base; return old_base;
} }
static int hl_access_sram_dram_region(struct hl_device *hdev, u64 addr, u64 *val, int hl_access_sram_dram_region(struct hl_device *hdev, u64 addr, u64 *val,
enum debugfs_access_type acc_type, enum pci_region region_type) enum debugfs_access_type acc_type, enum pci_region region_type, bool set_dram_bar)
{ {
struct pci_mem_region *region = &hdev->pci_mem_region[region_type]; struct pci_mem_region *region = &hdev->pci_mem_region[region_type];
u64 old_base = 0, rc, bar_region_base = region->region_base;
void __iomem *acc_addr; void __iomem *acc_addr;
u64 old_base = 0, rc;
if (region_type == PCI_REGION_DRAM) { if (set_dram_bar) {
old_base = hl_set_dram_bar(hdev, addr, region); old_base = hl_set_dram_bar(hdev, addr, region, &bar_region_base);
if (old_base == U64_MAX) if (old_base == U64_MAX)
return -EIO; return -EIO;
} }
acc_addr = hdev->pcie_bar[region->bar_id] + addr - region->region_base + acc_addr = hdev->pcie_bar[region->bar_id] + region->offset_in_bar +
region->offset_in_bar; (addr - bar_region_base);
switch (acc_type) { switch (acc_type) {
case DEBUGFS_READ8: case DEBUGFS_READ8:
*val = readb(acc_addr); *val = readb(acc_addr);
@ -96,8 +102,8 @@ static int hl_access_sram_dram_region(struct hl_device *hdev, u64 addr, u64 *val
break; break;
} }
if (region_type == PCI_REGION_DRAM) { if (set_dram_bar) {
rc = hl_set_dram_bar(hdev, old_base, region); rc = hl_set_dram_bar(hdev, old_base, region, NULL);
if (rc == U64_MAX) if (rc == U64_MAX)
return -EIO; return -EIO;
} }
@ -134,6 +140,9 @@ static void hl_asic_dma_free_common(struct hl_device *hdev, size_t size, void *c
dma_addr_t dma_handle, enum dma_alloc_type alloc_type, dma_addr_t dma_handle, enum dma_alloc_type alloc_type,
const char *caller) const char *caller)
{ {
/* this is needed to avoid warning on using freed pointer */
u64 store_cpu_addr = (u64) (uintptr_t) cpu_addr;
switch (alloc_type) { switch (alloc_type) {
case DMA_ALLOC_COHERENT: case DMA_ALLOC_COHERENT:
hdev->asic_funcs->asic_dma_free_coherent(hdev, size, cpu_addr, dma_handle); hdev->asic_funcs->asic_dma_free_coherent(hdev, size, cpu_addr, dma_handle);
@ -146,7 +155,7 @@ static void hl_asic_dma_free_common(struct hl_device *hdev, size_t size, void *c
break; break;
} }
trace_habanalabs_dma_free(hdev->dev, (u64) (uintptr_t) cpu_addr, dma_handle, size, caller); trace_habanalabs_dma_free(hdev->dev, store_cpu_addr, dma_handle, size, caller);
} }
void *hl_asic_dma_alloc_coherent_caller(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle, void *hl_asic_dma_alloc_coherent_caller(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle,
@ -279,7 +288,7 @@ int hl_access_dev_mem(struct hl_device *hdev, enum pci_region region_type,
case PCI_REGION_SRAM: case PCI_REGION_SRAM:
case PCI_REGION_DRAM: case PCI_REGION_DRAM:
return hl_access_sram_dram_region(hdev, addr, val, acc_type, return hl_access_sram_dram_region(hdev, addr, val, acc_type,
region_type); region_type, (region_type == PCI_REGION_DRAM));
default: default:
return -EFAULT; return -EFAULT;
} }
@ -355,10 +364,49 @@ bool hl_device_operational(struct hl_device *hdev,
} }
} }
bool hl_ctrl_device_operational(struct hl_device *hdev,
enum hl_device_status *status)
{
enum hl_device_status current_status;
current_status = hl_device_status(hdev);
if (status)
*status = current_status;
switch (current_status) {
case HL_DEVICE_STATUS_MALFUNCTION:
return false;
case HL_DEVICE_STATUS_IN_RESET:
case HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE:
case HL_DEVICE_STATUS_NEEDS_RESET:
case HL_DEVICE_STATUS_OPERATIONAL:
case HL_DEVICE_STATUS_IN_DEVICE_CREATION:
default:
return true;
}
}
static void print_idle_status_mask(struct hl_device *hdev, const char *message,
u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE])
{
u32 pad_width[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {};
BUILD_BUG_ON(HL_BUSY_ENGINES_MASK_EXT_SIZE != 4);
pad_width[3] = idle_mask[3] ? 16 : 0;
pad_width[2] = idle_mask[2] || pad_width[3] ? 16 : 0;
pad_width[1] = idle_mask[1] || pad_width[2] ? 16 : 0;
pad_width[0] = idle_mask[0] || pad_width[1] ? 16 : 0;
dev_err(hdev->dev, "%s (mask %0*llx_%0*llx_%0*llx_%0*llx)\n",
message, pad_width[3], idle_mask[3], pad_width[2], idle_mask[2],
pad_width[1], idle_mask[1], pad_width[0], idle_mask[0]);
}
static void hpriv_release(struct kref *ref) static void hpriv_release(struct kref *ref)
{ {
u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0}; u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0};
bool device_is_idle = true; bool reset_device, device_is_idle = true;
struct hl_fpriv *hpriv; struct hl_fpriv *hpriv;
struct hl_device *hdev; struct hl_device *hdev;
@ -375,15 +423,19 @@ static void hpriv_release(struct kref *ref)
mutex_destroy(&hpriv->ctx_lock); mutex_destroy(&hpriv->ctx_lock);
mutex_destroy(&hpriv->restore_phase_mutex); mutex_destroy(&hpriv->restore_phase_mutex);
if ((!hdev->pldm) && (hdev->pdev) && /* Device should be reset if reset-upon-device-release is enabled, or if there is a pending
(!hdev->asic_funcs->is_device_idle(hdev, * reset that waits for device release.
idle_mask, */
HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL))) { reset_device = hdev->reset_upon_device_release || hdev->reset_info.watchdog_active;
dev_err(hdev->dev,
"device not idle after user context is closed (0x%llx_%llx)\n",
idle_mask[1], idle_mask[0]);
device_is_idle = false; /* Unless device is reset in any case, check idle status and reset if device is not idle */
if (!reset_device && hdev->pdev && !hdev->pldm)
device_is_idle = hdev->asic_funcs->is_device_idle(hdev, idle_mask,
HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL);
if (!device_is_idle) {
print_idle_status_mask(hdev, "device is not idle after user context is closed",
idle_mask);
reset_device = true;
} }
/* We need to remove the user from the list to make sure the reset process won't /* We need to remove the user from the list to make sure the reset process won't
@ -399,9 +451,10 @@ static void hpriv_release(struct kref *ref)
list_del(&hpriv->dev_node); list_del(&hpriv->dev_node);
mutex_unlock(&hdev->fpriv_list_lock); mutex_unlock(&hdev->fpriv_list_lock);
if (!device_is_idle || hdev->reset_upon_device_release) { if (reset_device) {
hl_device_reset(hdev, HL_DRV_RESET_DEV_RELEASE); hl_device_reset(hdev, HL_DRV_RESET_DEV_RELEASE);
} else { } else {
/* Scrubbing is handled within hl_device_reset(), so here need to do it directly */
int rc = hdev->asic_funcs->scrub_device_mem(hdev); int rc = hdev->asic_funcs->scrub_device_mem(hdev);
if (rc) if (rc)
@ -468,9 +521,10 @@ static int hl_device_release(struct inode *inode, struct file *filp)
hdev->compute_ctx_in_release = 1; hdev->compute_ctx_in_release = 1;
if (!hl_hpriv_put(hpriv)) if (!hl_hpriv_put(hpriv)) {
dev_notice(hdev->dev, dev_notice(hdev->dev, "User process closed FD but device still in use\n");
"User process closed FD but device still in use\n"); hl_device_reset(hdev, HL_DRV_RESET_HARD);
}
hdev->last_open_session_duration_jif = hdev->last_open_session_duration_jif =
jiffies - hdev->last_successful_open_jif; jiffies - hdev->last_successful_open_jif;
@ -658,17 +712,42 @@ static void device_hard_reset_pending(struct work_struct *work)
flags = device_reset_work->flags | HL_DRV_RESET_FROM_RESET_THR; flags = device_reset_work->flags | HL_DRV_RESET_FROM_RESET_THR;
rc = hl_device_reset(hdev, flags); rc = hl_device_reset(hdev, flags);
if ((rc == -EBUSY) && !hdev->device_fini_pending) {
dev_info(hdev->dev,
"Could not reset device. will try again in %u seconds",
HL_PENDING_RESET_PER_SEC);
queue_delayed_work(device_reset_work->wq, if ((rc == -EBUSY) && !hdev->device_fini_pending) {
&device_reset_work->reset_work, struct hl_ctx *ctx = hl_get_compute_ctx(hdev);
if (ctx) {
/* The read refcount value should subtracted by one, because the read is
* protected with hl_get_compute_ctx().
*/
dev_info(hdev->dev,
"Could not reset device (compute_ctx refcount %u). will try again in %u seconds",
kref_read(&ctx->refcount) - 1, HL_PENDING_RESET_PER_SEC);
hl_ctx_put(ctx);
} else {
dev_info(hdev->dev, "Could not reset device. will try again in %u seconds",
HL_PENDING_RESET_PER_SEC);
}
queue_delayed_work(hdev->reset_wq, &device_reset_work->reset_work,
msecs_to_jiffies(HL_PENDING_RESET_PER_SEC * 1000)); msecs_to_jiffies(HL_PENDING_RESET_PER_SEC * 1000));
} }
} }
static void device_release_watchdog_func(struct work_struct *work)
{
struct hl_device_reset_work *device_release_watchdog_work =
container_of(work, struct hl_device_reset_work, reset_work.work);
struct hl_device *hdev = device_release_watchdog_work->hdev;
u32 flags;
dev_dbg(hdev->dev, "Device wasn't released in time. Initiate device reset.\n");
flags = device_release_watchdog_work->flags | HL_DRV_RESET_FROM_WD_THR;
hl_device_reset(hdev, flags);
}
/* /*
* device_early_init - do some early initialization for the habanalabs device * device_early_init - do some early initialization for the habanalabs device
* *
@ -699,9 +778,10 @@ static int device_early_init(struct hl_device *hdev)
gaudi2_set_asic_funcs(hdev); gaudi2_set_asic_funcs(hdev);
strscpy(hdev->asic_name, "GAUDI2", sizeof(hdev->asic_name)); strscpy(hdev->asic_name, "GAUDI2", sizeof(hdev->asic_name));
break; break;
case ASIC_GAUDI2_SEC: case ASIC_GAUDI2B:
gaudi2_set_asic_funcs(hdev); gaudi2_set_asic_funcs(hdev);
strscpy(hdev->asic_name, "GAUDI2 SEC", sizeof(hdev->asic_name)); strscpy(hdev->asic_name, "GAUDI2B", sizeof(hdev->asic_name));
break;
break; break;
default: default:
dev_err(hdev->dev, "Unrecognized ASIC type %d\n", dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
@ -737,7 +817,7 @@ static int device_early_init(struct hl_device *hdev)
} }
} }
hdev->eq_wq = alloc_workqueue("hl-events", WQ_UNBOUND, 0); hdev->eq_wq = create_singlethread_workqueue("hl-events");
if (hdev->eq_wq == NULL) { if (hdev->eq_wq == NULL) {
dev_err(hdev->dev, "Failed to allocate EQ workqueue\n"); dev_err(hdev->dev, "Failed to allocate EQ workqueue\n");
rc = -ENOMEM; rc = -ENOMEM;
@ -760,8 +840,8 @@ static int device_early_init(struct hl_device *hdev)
goto free_cs_cmplt_wq; goto free_cs_cmplt_wq;
} }
hdev->pf_wq = alloc_workqueue("hl-prefetch", WQ_UNBOUND, 0); hdev->prefetch_wq = alloc_workqueue("hl-prefetch", WQ_UNBOUND, 0);
if (!hdev->pf_wq) { if (!hdev->prefetch_wq) {
dev_err(hdev->dev, "Failed to allocate MMU prefetch workqueue\n"); dev_err(hdev->dev, "Failed to allocate MMU prefetch workqueue\n");
rc = -ENOMEM; rc = -ENOMEM;
goto free_ts_free_wq; goto free_ts_free_wq;
@ -771,7 +851,7 @@ static int device_early_init(struct hl_device *hdev)
GFP_KERNEL); GFP_KERNEL);
if (!hdev->hl_chip_info) { if (!hdev->hl_chip_info) {
rc = -ENOMEM; rc = -ENOMEM;
goto free_pf_wq; goto free_prefetch_wq;
} }
rc = hl_mmu_if_set_funcs(hdev); rc = hl_mmu_if_set_funcs(hdev);
@ -780,19 +860,21 @@ static int device_early_init(struct hl_device *hdev)
hl_mem_mgr_init(hdev->dev, &hdev->kernel_mem_mgr); hl_mem_mgr_init(hdev->dev, &hdev->kernel_mem_mgr);
hdev->device_reset_work.wq = hdev->reset_wq = create_singlethread_workqueue("hl_device_reset");
create_singlethread_workqueue("hl_device_reset"); if (!hdev->reset_wq) {
if (!hdev->device_reset_work.wq) {
rc = -ENOMEM; rc = -ENOMEM;
dev_err(hdev->dev, "Failed to create device reset WQ\n"); dev_err(hdev->dev, "Failed to create device reset WQ\n");
goto free_cb_mgr; goto free_cb_mgr;
} }
INIT_DELAYED_WORK(&hdev->device_reset_work.reset_work, INIT_DELAYED_WORK(&hdev->device_reset_work.reset_work, device_hard_reset_pending);
device_hard_reset_pending);
hdev->device_reset_work.hdev = hdev; hdev->device_reset_work.hdev = hdev;
hdev->device_fini_pending = 0; hdev->device_fini_pending = 0;
INIT_DELAYED_WORK(&hdev->device_release_watchdog_work.reset_work,
device_release_watchdog_func);
hdev->device_release_watchdog_work.hdev = hdev;
mutex_init(&hdev->send_cpu_message_lock); mutex_init(&hdev->send_cpu_message_lock);
mutex_init(&hdev->debug_lock); mutex_init(&hdev->debug_lock);
INIT_LIST_HEAD(&hdev->cs_mirror_list); INIT_LIST_HEAD(&hdev->cs_mirror_list);
@ -810,8 +892,8 @@ free_cb_mgr:
hl_mem_mgr_fini(&hdev->kernel_mem_mgr); hl_mem_mgr_fini(&hdev->kernel_mem_mgr);
free_chip_info: free_chip_info:
kfree(hdev->hl_chip_info); kfree(hdev->hl_chip_info);
free_pf_wq: free_prefetch_wq:
destroy_workqueue(hdev->pf_wq); destroy_workqueue(hdev->prefetch_wq);
free_ts_free_wq: free_ts_free_wq:
destroy_workqueue(hdev->ts_free_obj_wq); destroy_workqueue(hdev->ts_free_obj_wq);
free_cs_cmplt_wq: free_cs_cmplt_wq:
@ -854,11 +936,11 @@ static void device_early_fini(struct hl_device *hdev)
kfree(hdev->hl_chip_info); kfree(hdev->hl_chip_info);
destroy_workqueue(hdev->pf_wq); destroy_workqueue(hdev->prefetch_wq);
destroy_workqueue(hdev->ts_free_obj_wq); destroy_workqueue(hdev->ts_free_obj_wq);
destroy_workqueue(hdev->cs_cmplt_wq); destroy_workqueue(hdev->cs_cmplt_wq);
destroy_workqueue(hdev->eq_wq); destroy_workqueue(hdev->eq_wq);
destroy_workqueue(hdev->device_reset_work.wq); destroy_workqueue(hdev->reset_wq);
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
destroy_workqueue(hdev->cq_wq[i]); destroy_workqueue(hdev->cq_wq[i]);
@ -962,11 +1044,16 @@ static void device_late_fini(struct hl_device *hdev)
int hl_device_utilization(struct hl_device *hdev, u32 *utilization) int hl_device_utilization(struct hl_device *hdev, u32 *utilization)
{ {
u64 max_power, curr_power, dc_power, dividend; u64 max_power, curr_power, dc_power, dividend, divisor;
int rc; int rc;
max_power = hdev->max_power; max_power = hdev->max_power;
dc_power = hdev->asic_prop.dc_power_default; dc_power = hdev->asic_prop.dc_power_default;
divisor = max_power - dc_power;
if (!divisor) {
dev_warn(hdev->dev, "device utilization is not supported\n");
return -EOPNOTSUPP;
}
rc = hl_fw_cpucp_power_get(hdev, &curr_power); rc = hl_fw_cpucp_power_get(hdev, &curr_power);
if (rc) if (rc)
@ -975,7 +1062,7 @@ int hl_device_utilization(struct hl_device *hdev, u32 *utilization)
curr_power = clamp(curr_power, dc_power, max_power); curr_power = clamp(curr_power, dc_power, max_power);
dividend = (curr_power - dc_power) * 100; dividend = (curr_power - dc_power) * 100;
*utilization = (u32) div_u64(dividend, (max_power - dc_power)); *utilization = (u32) div_u64(dividend, divisor);
return 0; return 0;
} }
@ -1053,7 +1140,7 @@ static void cleanup_resources(struct hl_device *hdev, bool hard_reset, bool fw_r
hl_cs_rollback_all(hdev, skip_wq_flush); hl_cs_rollback_all(hdev, skip_wq_flush);
/* flush the MMU prefetch workqueue */ /* flush the MMU prefetch workqueue */
flush_workqueue(hdev->pf_wq); flush_workqueue(hdev->prefetch_wq);
/* Release all pending user interrupts, each pending user interrupt /* Release all pending user interrupts, each pending user interrupt
* holds a reference to user context * holds a reference to user context
@ -1264,6 +1351,10 @@ static void handle_reset_trigger(struct hl_device *hdev, u32 flags)
{ {
u32 cur_reset_trigger = HL_RESET_TRIGGER_DEFAULT; u32 cur_reset_trigger = HL_RESET_TRIGGER_DEFAULT;
/* No consecutive mechanism when user context exists */
if (hdev->is_compute_ctx_active)
return;
/* /*
* 'reset cause' is being updated here, because getting here * 'reset cause' is being updated here, because getting here
* means that it's the 1st time and the last time we're here * means that it's the 1st time and the last time we're here
@ -1337,8 +1428,8 @@ static void handle_reset_trigger(struct hl_device *hdev, u32 flags)
int hl_device_reset(struct hl_device *hdev, u32 flags) int hl_device_reset(struct hl_device *hdev, u32 flags)
{ {
bool hard_reset, from_hard_reset_thread, fw_reset, hard_instead_soft = false, bool hard_reset, from_hard_reset_thread, fw_reset, hard_instead_soft = false,
reset_upon_device_release = false, schedule_hard_reset = false, reset_upon_device_release = false, schedule_hard_reset = false, delay_reset,
skip_wq_flush, delay_reset; from_dev_release, from_watchdog_thread;
u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0}; u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0};
struct hl_ctx *ctx; struct hl_ctx *ctx;
int i, rc; int i, rc;
@ -1351,8 +1442,9 @@ int hl_device_reset(struct hl_device *hdev, u32 flags)
hard_reset = !!(flags & HL_DRV_RESET_HARD); hard_reset = !!(flags & HL_DRV_RESET_HARD);
from_hard_reset_thread = !!(flags & HL_DRV_RESET_FROM_RESET_THR); from_hard_reset_thread = !!(flags & HL_DRV_RESET_FROM_RESET_THR);
fw_reset = !!(flags & HL_DRV_RESET_BYPASS_REQ_TO_FW); fw_reset = !!(flags & HL_DRV_RESET_BYPASS_REQ_TO_FW);
skip_wq_flush = !!(flags & HL_DRV_RESET_DEV_RELEASE); from_dev_release = !!(flags & HL_DRV_RESET_DEV_RELEASE);
delay_reset = !!(flags & HL_DRV_RESET_DELAY); delay_reset = !!(flags & HL_DRV_RESET_DELAY);
from_watchdog_thread = !!(flags & HL_DRV_RESET_FROM_WD_THR);
if (!hard_reset && !hdev->asic_prop.supports_compute_reset) { if (!hard_reset && !hdev->asic_prop.supports_compute_reset) {
hard_instead_soft = true; hard_instead_soft = true;
@ -1409,6 +1501,23 @@ do_reset:
spin_unlock(&hdev->reset_info.lock); spin_unlock(&hdev->reset_info.lock);
/* Cancel the device release watchdog work if required.
* In case of reset-upon-device-release while the release watchdog work is
* scheduled, do hard-reset instead of compute-reset.
*/
if ((hard_reset || from_dev_release) && hdev->reset_info.watchdog_active) {
hdev->reset_info.watchdog_active = 0;
if (!from_watchdog_thread)
cancel_delayed_work_sync(
&hdev->device_release_watchdog_work.reset_work);
if (from_dev_release) {
flags |= HL_DRV_RESET_HARD;
flags &= ~HL_DRV_RESET_DEV_RELEASE;
hard_reset = true;
}
}
if (delay_reset) if (delay_reset)
usleep_range(HL_RESET_DELAY_USEC, HL_RESET_DELAY_USEC << 1); usleep_range(HL_RESET_DELAY_USEC, HL_RESET_DELAY_USEC << 1);
@ -1439,13 +1548,12 @@ again:
* Because the reset function can't run from heartbeat work, * Because the reset function can't run from heartbeat work,
* we need to call the reset function from a dedicated work. * we need to call the reset function from a dedicated work.
*/ */
queue_delayed_work(hdev->device_reset_work.wq, queue_delayed_work(hdev->reset_wq, &hdev->device_reset_work.reset_work, 0);
&hdev->device_reset_work.reset_work, 0);
return 0; return 0;
} }
cleanup_resources(hdev, hard_reset, fw_reset, skip_wq_flush); cleanup_resources(hdev, hard_reset, fw_reset, from_dev_release);
kill_processes: kill_processes:
if (hard_reset) { if (hard_reset) {
@ -1582,8 +1690,7 @@ kill_processes:
/* If device is not idle fail the reset process */ /* If device is not idle fail the reset process */
if (!hdev->asic_funcs->is_device_idle(hdev, idle_mask, if (!hdev->asic_funcs->is_device_idle(hdev, idle_mask,
HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL)) { HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL)) {
dev_err(hdev->dev, "device is not idle (mask 0x%llx_%llx) after reset\n", print_idle_status_mask(hdev, "device is not idle after reset", idle_mask);
idle_mask[1], idle_mask[0]);
rc = -EIO; rc = -EIO;
goto out_err; goto out_err;
} }
@ -1658,9 +1765,9 @@ kill_processes:
* the device will be operational although it shouldn't be * the device will be operational although it shouldn't be
*/ */
hdev->asic_funcs->enable_events_from_fw(hdev); hdev->asic_funcs->enable_events_from_fw(hdev);
} else if (!reset_upon_device_release) { } else {
if (!reset_upon_device_release)
hdev->reset_info.compute_reset_cnt++; hdev->reset_info.compute_reset_cnt++;
}
if (schedule_hard_reset) { if (schedule_hard_reset) {
dev_info(hdev->dev, "Performing hard reset scheduled during compute reset\n"); dev_info(hdev->dev, "Performing hard reset scheduled during compute reset\n");
@ -1671,6 +1778,7 @@ kill_processes:
handle_reset_trigger(hdev, flags); handle_reset_trigger(hdev, flags);
goto again; goto again;
} }
}
return 0; return 0;
@ -1706,6 +1814,73 @@ out_err:
return rc; return rc;
} }
/*
* hl_device_cond_reset() - conditionally reset the device.
* @hdev: pointer to habanalabs device structure.
* @reset_flags: reset flags.
* @event_mask: events to notify user about.
*
* Conditionally reset the device, or alternatively schedule a watchdog work to reset the device
* unless another reset precedes it.
*/
int hl_device_cond_reset(struct hl_device *hdev, u32 flags, u64 event_mask)
{
struct hl_ctx *ctx = NULL;
/* Device release watchdog is only for hard reset */
if (!(flags & HL_DRV_RESET_HARD) && hdev->asic_prop.allow_inference_soft_reset)
goto device_reset;
/* F/W reset cannot be postponed */
if (flags & HL_DRV_RESET_BYPASS_REQ_TO_FW)
goto device_reset;
/* Device release watchdog is relevant only if user exists and gets a reset notification */
if (!(event_mask & HL_NOTIFIER_EVENT_DEVICE_RESET)) {
dev_err(hdev->dev, "Resetting device without a reset indication to user\n");
goto device_reset;
}
ctx = hl_get_compute_ctx(hdev);
if (!ctx || !ctx->hpriv->notifier_event.eventfd)
goto device_reset;
/* Schedule the device release watchdog work unless reset is already in progress or if the
* work is already scheduled.
*/
spin_lock(&hdev->reset_info.lock);
if (hdev->reset_info.in_reset) {
spin_unlock(&hdev->reset_info.lock);
goto device_reset;
}
if (hdev->reset_info.watchdog_active)
goto out;
hdev->device_release_watchdog_work.flags = flags;
dev_dbg(hdev->dev, "Device is going to be reset in %u sec unless being released\n",
hdev->device_release_watchdog_timeout_sec);
schedule_delayed_work(&hdev->device_release_watchdog_work.reset_work,
msecs_to_jiffies(hdev->device_release_watchdog_timeout_sec * 1000));
hdev->reset_info.watchdog_active = 1;
out:
spin_unlock(&hdev->reset_info.lock);
hl_notifier_event_send_all(hdev, event_mask);
hl_ctx_put(ctx);
return 0;
device_reset:
if (event_mask)
hl_notifier_event_send_all(hdev, event_mask);
if (ctx)
hl_ctx_put(ctx);
return hl_device_reset(hdev, flags);
}
static void hl_notifier_event_send(struct hl_notifier_event *notifier_event, u64 event_mask) static void hl_notifier_event_send(struct hl_notifier_event *notifier_event, u64 event_mask)
{ {
mutex_lock(&notifier_event->lock); mutex_lock(&notifier_event->lock);
@ -1728,6 +1903,11 @@ void hl_notifier_event_send_all(struct hl_device *hdev, u64 event_mask)
{ {
struct hl_fpriv *hpriv; struct hl_fpriv *hpriv;
if (!event_mask) {
dev_warn(hdev->dev, "Skip sending zero event");
return;
}
mutex_lock(&hdev->fpriv_list_lock); mutex_lock(&hdev->fpriv_list_lock);
list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node) list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node)
@ -1898,6 +2078,8 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
hdev->asic_funcs->state_dump_init(hdev); hdev->asic_funcs->state_dump_init(hdev);
hdev->device_release_watchdog_timeout_sec = HL_DEVICE_RELEASE_WATCHDOG_TIMEOUT_SEC;
hdev->memory_scrub_val = MEM_SCRUB_DEFAULT_VAL; hdev->memory_scrub_val = MEM_SCRUB_DEFAULT_VAL;
hl_debugfs_add_device(hdev); hl_debugfs_add_device(hdev);
@ -2118,6 +2300,8 @@ void hl_device_fini(struct hl_device *hdev)
} }
} }
cancel_delayed_work_sync(&hdev->device_release_watchdog_work.reset_work);
/* Disable PCI access from device F/W so it won't send us additional /* Disable PCI access from device F/W so it won't send us additional
* interrupts. We disable MSI/MSI-X at the halt_engines function and we * interrupts. We disable MSI/MSI-X at the halt_engines function and we
* can't have the F/W sending us interrupts after that. We need to * can't have the F/W sending us interrupts after that. We need to
@ -2144,14 +2328,16 @@ void hl_device_fini(struct hl_device *hdev)
*/ */
dev_info(hdev->dev, dev_info(hdev->dev,
"Waiting for all processes to exit (timeout of %u seconds)", "Waiting for all processes to exit (timeout of %u seconds)",
HL_PENDING_RESET_LONG_SEC); HL_WAIT_PROCESS_KILL_ON_DEVICE_FINI);
rc = device_kill_open_processes(hdev, HL_PENDING_RESET_LONG_SEC, false); hdev->process_kill_trial_cnt = 0;
rc = device_kill_open_processes(hdev, HL_WAIT_PROCESS_KILL_ON_DEVICE_FINI, false);
if (rc) { if (rc) {
dev_crit(hdev->dev, "Failed to kill all open processes\n"); dev_crit(hdev->dev, "Failed to kill all open processes\n");
device_disable_open_processes(hdev, false); device_disable_open_processes(hdev, false);
} }
hdev->process_kill_trial_cnt = 0;
rc = device_kill_open_processes(hdev, 0, true); rc = device_kill_open_processes(hdev, 0, true);
if (rc) { if (rc) {
dev_crit(hdev->dev, "Failed to kill all control device open processes\n"); dev_crit(hdev->dev, "Failed to kill all control device open processes\n");
@ -2177,6 +2363,8 @@ void hl_device_fini(struct hl_device *hdev)
hl_mmu_fini(hdev); hl_mmu_fini(hdev);
vfree(hdev->captured_err_info.pgf_info.user_mappings);
hl_eq_fini(hdev, &hdev->event_queue); hl_eq_fini(hdev, &hdev->event_queue);
kfree(hdev->shadow_cs_queue); kfree(hdev->shadow_cs_queue);
@ -2231,3 +2419,117 @@ inline void hl_wreg(struct hl_device *hdev, u32 reg, u32 val)
{ {
writel(val, hdev->rmmio + reg); writel(val, hdev->rmmio + reg);
} }
void hl_capture_razwi(struct hl_device *hdev, u64 addr, u16 *engine_id, u16 num_of_engines,
u8 flags)
{
if (num_of_engines > HL_RAZWI_MAX_NUM_OF_ENGINES_PER_RTR) {
dev_err(hdev->dev,
"Number of possible razwi initiators (%u) exceeded limit (%u)\n",
num_of_engines, HL_RAZWI_MAX_NUM_OF_ENGINES_PER_RTR);
return;
}
/* In case it's the first razwi since the device was opened, capture its parameters */
if (atomic_cmpxchg(&hdev->captured_err_info.razwi_info_recorded, 0, 1))
return;
hdev->captured_err_info.razwi.timestamp = ktime_to_ns(ktime_get());
hdev->captured_err_info.razwi.addr = addr;
hdev->captured_err_info.razwi.num_of_possible_engines = num_of_engines;
memcpy(&hdev->captured_err_info.razwi.engine_id[0], &engine_id[0],
num_of_engines * sizeof(u16));
hdev->captured_err_info.razwi.flags = flags;
}
void hl_handle_razwi(struct hl_device *hdev, u64 addr, u16 *engine_id, u16 num_of_engines,
u8 flags, u64 *event_mask)
{
hl_capture_razwi(hdev, addr, engine_id, num_of_engines, flags);
if (event_mask)
*event_mask |= HL_NOTIFIER_EVENT_RAZWI;
}
static void hl_capture_user_mappings(struct hl_device *hdev, bool is_pmmu)
{
struct page_fault_info *pgf_info = &hdev->captured_err_info.pgf_info;
struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
struct hl_vm_hash_node *hnode;
struct hl_userptr *userptr;
enum vm_type *vm_type;
struct hl_ctx *ctx;
u32 map_idx = 0;
int i;
/* Reset previous session count*/
pgf_info->num_of_user_mappings = 0;
ctx = hl_get_compute_ctx(hdev);
if (!ctx) {
dev_err(hdev->dev, "Can't get user context for user mappings\n");
return;
}
mutex_lock(&ctx->mem_hash_lock);
hash_for_each(ctx->mem_hash, i, hnode, node) {
vm_type = hnode->ptr;
if (((*vm_type == VM_TYPE_USERPTR) && is_pmmu) ||
((*vm_type == VM_TYPE_PHYS_PACK) && !is_pmmu))
pgf_info->num_of_user_mappings++;
}
if (!pgf_info->num_of_user_mappings)
goto finish;
/* In case we already allocated in previous session, need to release it before
* allocating new buffer.
*/
vfree(pgf_info->user_mappings);
pgf_info->user_mappings =
vzalloc(pgf_info->num_of_user_mappings * sizeof(struct hl_user_mapping));
if (!pgf_info->user_mappings) {
pgf_info->num_of_user_mappings = 0;
goto finish;
}
hash_for_each(ctx->mem_hash, i, hnode, node) {
vm_type = hnode->ptr;
if ((*vm_type == VM_TYPE_USERPTR) && (is_pmmu)) {
userptr = hnode->ptr;
pgf_info->user_mappings[map_idx].dev_va = hnode->vaddr;
pgf_info->user_mappings[map_idx].size = userptr->size;
map_idx++;
} else if ((*vm_type == VM_TYPE_PHYS_PACK) && (!is_pmmu)) {
phys_pg_pack = hnode->ptr;
pgf_info->user_mappings[map_idx].dev_va = hnode->vaddr;
pgf_info->user_mappings[map_idx].size = phys_pg_pack->total_size;
map_idx++;
}
}
finish:
mutex_unlock(&ctx->mem_hash_lock);
hl_ctx_put(ctx);
}
void hl_capture_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_pmmu)
{
/* Capture only the first page fault */
if (atomic_cmpxchg(&hdev->captured_err_info.pgf_info_recorded, 0, 1))
return;
hdev->captured_err_info.pgf_info.pgf.timestamp = ktime_to_ns(ktime_get());
hdev->captured_err_info.pgf_info.pgf.addr = addr;
hdev->captured_err_info.pgf_info.pgf.engine_id = eng_id;
hl_capture_user_mappings(hdev, is_pmmu);
}
void hl_handle_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_pmmu,
u64 *event_mask)
{
hl_capture_page_fault(hdev, addr, eng_id, is_pmmu);
if (event_mask)
*event_mask |= HL_NOTIFIER_EVENT_PAGE_FAULT;
}

View File

@ -12,6 +12,7 @@
#include <linux/crc32.h> #include <linux/crc32.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/ctype.h> #include <linux/ctype.h>
#include <linux/vmalloc.h>
#define FW_FILE_MAX_SIZE 0x1400000 /* maximum size of 20MB */ #define FW_FILE_MAX_SIZE 0x1400000 /* maximum size of 20MB */
@ -323,6 +324,7 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
if (!prop->supports_advanced_cpucp_rc) { if (!prop->supports_advanced_cpucp_rc) {
dev_dbg(hdev->dev, "F/W ERROR %d for CPU packet %d\n", rc, opcode); dev_dbg(hdev->dev, "F/W ERROR %d for CPU packet %d\n", rc, opcode);
rc = -EIO;
goto scrub_descriptor; goto scrub_descriptor;
} }
@ -615,16 +617,12 @@ static bool fw_report_boot_dev0(struct hl_device *hdev, u32 err_val,
if (sts_val & CPU_BOOT_DEV_STS0_ENABLED) if (sts_val & CPU_BOOT_DEV_STS0_ENABLED)
dev_dbg(hdev->dev, "Device status0 %#x\n", sts_val); dev_dbg(hdev->dev, "Device status0 %#x\n", sts_val);
/* All warnings should go here in order not to reach the unknown error validation */
if (err_val & CPU_BOOT_ERR0_EEPROM_FAIL) { if (err_val & CPU_BOOT_ERR0_EEPROM_FAIL) {
dev_warn(hdev->dev, dev_err(hdev->dev, "Device boot error - EEPROM failure detected\n");
"Device boot warning - EEPROM failure detected, default settings applied\n"); err_exists = true;
/* This is a warning so we don't want it to disable the
* device
*/
err_val &= ~CPU_BOOT_ERR0_EEPROM_FAIL;
} }
/* All warnings should go here in order not to reach the unknown error validation */
if (err_val & CPU_BOOT_ERR0_DRAM_SKIPPED) { if (err_val & CPU_BOOT_ERR0_DRAM_SKIPPED) {
dev_warn(hdev->dev, dev_warn(hdev->dev,
"Device boot warning - Skipped DRAM initialization\n"); "Device boot warning - Skipped DRAM initialization\n");
@ -1782,6 +1780,8 @@ int hl_fw_dynamic_send_protocol_cmd(struct hl_device *hdev,
/* first send clear command to clean former commands */ /* first send clear command to clean former commands */
rc = hl_fw_dynamic_send_clear_cmd(hdev, fw_loader); rc = hl_fw_dynamic_send_clear_cmd(hdev, fw_loader);
if (rc)
return rc;
/* send the actual command */ /* send the actual command */
hl_fw_dynamic_send_cmd(hdev, fw_loader, cmd, size); hl_fw_dynamic_send_cmd(hdev, fw_loader, cmd, size);
@ -1988,10 +1988,11 @@ static int hl_fw_dynamic_read_and_validate_descriptor(struct hl_device *hdev,
struct fw_load_mgr *fw_loader) struct fw_load_mgr *fw_loader)
{ {
struct lkd_fw_comms_desc *fw_desc; struct lkd_fw_comms_desc *fw_desc;
void __iomem *src, *temp_fw_desc;
struct pci_mem_region *region; struct pci_mem_region *region;
struct fw_response *response; struct fw_response *response;
u16 fw_data_size;
enum pci_region region_id; enum pci_region region_id;
void __iomem *src;
int rc; int rc;
fw_desc = &fw_loader->dynamic_loader.comm_desc; fw_desc = &fw_loader->dynamic_loader.comm_desc;
@ -2018,9 +2019,29 @@ static int hl_fw_dynamic_read_and_validate_descriptor(struct hl_device *hdev,
fw_loader->dynamic_loader.fw_desc_valid = false; fw_loader->dynamic_loader.fw_desc_valid = false;
src = hdev->pcie_bar[region->bar_id] + region->offset_in_bar + src = hdev->pcie_bar[region->bar_id] + region->offset_in_bar +
response->ram_offset; response->ram_offset;
memcpy_fromio(fw_desc, src, sizeof(struct lkd_fw_comms_desc));
return hl_fw_dynamic_validate_descriptor(hdev, fw_loader, fw_desc); /*
* We do the copy of the fw descriptor in 2 phases:
* 1. copy the header + data info according to our lkd_fw_comms_desc definition.
* then we're able to read the actual data size provided by fw.
* this is needed for cases where data in descriptor was changed(add/remove)
* in embedded specs header file before updating lkd copy of the header file
* 2. copy descriptor to temporary buffer with aligned size and send it to validation
*/
memcpy_fromio(fw_desc, src, sizeof(struct lkd_fw_comms_desc));
fw_data_size = le16_to_cpu(fw_desc->header.size);
temp_fw_desc = vzalloc(sizeof(struct comms_desc_header) + fw_data_size);
if (!temp_fw_desc)
return -ENOMEM;
memcpy_fromio(temp_fw_desc, src, sizeof(struct comms_desc_header) + fw_data_size);
rc = hl_fw_dynamic_validate_descriptor(hdev, fw_loader,
(struct lkd_fw_comms_desc *) temp_fw_desc);
vfree(temp_fw_desc);
return rc;
} }
/** /**
@ -2507,7 +2528,7 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
struct fw_load_mgr *fw_loader) struct fw_load_mgr *fw_loader)
{ {
struct cpu_dyn_regs *dyn_regs; struct cpu_dyn_regs *dyn_regs;
int rc; int rc, fw_error_rc;
dev_info(hdev->dev, dev_info(hdev->dev,
"Loading %sfirmware to device, may take some time...\n", "Loading %sfirmware to device, may take some time...\n",
@ -2607,14 +2628,17 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
hl_fw_dynamic_update_linux_interrupt_if(hdev); hl_fw_dynamic_update_linux_interrupt_if(hdev);
return 0;
protocol_err: protocol_err:
if (fw_loader->dynamic_loader.fw_desc_valid) if (fw_loader->dynamic_loader.fw_desc_valid) {
fw_read_errors(hdev, le32_to_cpu(dyn_regs->cpu_boot_err0), fw_error_rc = fw_read_errors(hdev, le32_to_cpu(dyn_regs->cpu_boot_err0),
le32_to_cpu(dyn_regs->cpu_boot_err1), le32_to_cpu(dyn_regs->cpu_boot_err1),
le32_to_cpu(dyn_regs->cpu_boot_dev_sts0), le32_to_cpu(dyn_regs->cpu_boot_dev_sts0),
le32_to_cpu(dyn_regs->cpu_boot_dev_sts1)); le32_to_cpu(dyn_regs->cpu_boot_dev_sts1));
if (fw_error_rc)
return fw_error_rc;
}
return rc; return rc;
} }
@ -2983,7 +3007,7 @@ static int hl_fw_get_sec_attest_data(struct hl_device *hdev, u32 packet_id, void
int rc; int rc;
req_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, size, &req_dma_addr); req_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, size, &req_dma_addr);
if (!data) { if (!req_cpu_addr) {
dev_err(hdev->dev, dev_err(hdev->dev,
"Failed to allocate DMA memory for CPU-CP packet %u\n", packet_id); "Failed to allocate DMA memory for CPU-CP packet %u\n", packet_id);
return -ENOMEM; return -ENOMEM;

View File

@ -53,6 +53,11 @@ struct hl_fpriv;
#define HL_PENDING_RESET_PER_SEC 10 #define HL_PENDING_RESET_PER_SEC 10
#define HL_PENDING_RESET_MAX_TRIALS 60 /* 10 minutes */ #define HL_PENDING_RESET_MAX_TRIALS 60 /* 10 minutes */
#define HL_PENDING_RESET_LONG_SEC 60 #define HL_PENDING_RESET_LONG_SEC 60
/*
* In device fini, wait 10 minutes for user processes to be terminated after we kill them.
* This is needed to prevent situation of clearing resources while user processes are still alive.
*/
#define HL_WAIT_PROCESS_KILL_ON_DEVICE_FINI 600
#define HL_HARD_RESET_MAX_TIMEOUT 120 #define HL_HARD_RESET_MAX_TIMEOUT 120
#define HL_PLDM_HARD_RESET_MAX_TIMEOUT (HL_HARD_RESET_MAX_TIMEOUT * 3) #define HL_PLDM_HARD_RESET_MAX_TIMEOUT (HL_HARD_RESET_MAX_TIMEOUT * 3)
@ -191,6 +196,9 @@ enum hl_mmu_enablement {
* *
* - HL_DRV_RESET_DELAY * - HL_DRV_RESET_DELAY
* Set if a delay should be added before the reset * Set if a delay should be added before the reset
*
* - HL_DRV_RESET_FROM_WD_THR
* Set if the caller is the device release watchdog thread
*/ */
#define HL_DRV_RESET_HARD (1 << 0) #define HL_DRV_RESET_HARD (1 << 0)
@ -201,6 +209,7 @@ enum hl_mmu_enablement {
#define HL_DRV_RESET_BYPASS_REQ_TO_FW (1 << 5) #define HL_DRV_RESET_BYPASS_REQ_TO_FW (1 << 5)
#define HL_DRV_RESET_FW_FATAL_ERR (1 << 6) #define HL_DRV_RESET_FW_FATAL_ERR (1 << 6)
#define HL_DRV_RESET_DELAY (1 << 7) #define HL_DRV_RESET_DELAY (1 << 7)
#define HL_DRV_RESET_FROM_WD_THR (1 << 8)
/* /*
* Security * Security
@ -1188,7 +1197,7 @@ struct hl_dec {
* @ASIC_GAUDI: Gaudi device (HL-2000). * @ASIC_GAUDI: Gaudi device (HL-2000).
* @ASIC_GAUDI_SEC: Gaudi secured device (HL-2000). * @ASIC_GAUDI_SEC: Gaudi secured device (HL-2000).
* @ASIC_GAUDI2: Gaudi2 device. * @ASIC_GAUDI2: Gaudi2 device.
* @ASIC_GAUDI2_SEC: Gaudi2 secured device. * @ASIC_GAUDI2B: Gaudi2B device.
*/ */
enum hl_asic_type { enum hl_asic_type {
ASIC_INVALID, ASIC_INVALID,
@ -1196,7 +1205,7 @@ enum hl_asic_type {
ASIC_GAUDI, ASIC_GAUDI,
ASIC_GAUDI_SEC, ASIC_GAUDI_SEC,
ASIC_GAUDI2, ASIC_GAUDI2,
ASIC_GAUDI2_SEC, ASIC_GAUDI2B,
}; };
struct hl_cs_parser; struct hl_cs_parser;
@ -2489,13 +2498,9 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
#define WREG32_AND(reg, and) WREG32_P(reg, 0, and) #define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or)) #define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
#define RMWREG32(reg, val, mask) \ #define RMWREG32_SHIFTED(reg, val, mask) WREG32_P(reg, val, ~(mask))
do { \
u32 tmp_ = RREG32(reg); \ #define RMWREG32(reg, val, mask) RMWREG32_SHIFTED(reg, (val) << __ffs(mask), mask)
tmp_ &= ~(mask); \
tmp_ |= ((val) << __ffs(mask)); \
WREG32(reg, tmp_); \
} while (0)
#define RREG32_MASK(reg, mask) ((RREG32(reg) & mask) >> __ffs(mask)) #define RREG32_MASK(reg, mask) ((RREG32(reg) & mask) >> __ffs(mask))
@ -2528,7 +2533,7 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
break; \ break; \
(val) = __elbi_read; \ (val) = __elbi_read; \
} else {\ } else {\
(val) = RREG32((u32)(addr)); \ (val) = RREG32(lower_32_bits(addr)); \
} \ } \
if (cond) \ if (cond) \
break; \ break; \
@ -2539,7 +2544,7 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
break; \ break; \
(val) = __elbi_read; \ (val) = __elbi_read; \
} else {\ } else {\
(val) = RREG32((u32)(addr)); \ (val) = RREG32(lower_32_bits(addr)); \
} \ } \
break; \ break; \
} \ } \
@ -2594,7 +2599,7 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
if (__rc) \ if (__rc) \
break; \ break; \
} else { \ } else { \
__read_val = RREG32((u32)(addr_arr)[__arr_idx]); \ __read_val = RREG32(lower_32_bits(addr_arr[__arr_idx])); \
} \ } \
if (__read_val == (expected_val)) \ if (__read_val == (expected_val)) \
__elem_bitmask &= ~BIT_ULL(__arr_idx); \ __elem_bitmask &= ~BIT_ULL(__arr_idx); \
@ -2682,14 +2687,12 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
struct hwmon_chip_info; struct hwmon_chip_info;
/** /**
* struct hl_device_reset_work - reset workqueue task wrapper. * struct hl_device_reset_work - reset work wrapper.
* @wq: work queue for device reset procedure.
* @reset_work: reset work to be done. * @reset_work: reset work to be done.
* @hdev: habanalabs device structure. * @hdev: habanalabs device structure.
* @flags: reset flags. * @flags: reset flags.
*/ */
struct hl_device_reset_work { struct hl_device_reset_work {
struct workqueue_struct *wq;
struct delayed_work reset_work; struct delayed_work reset_work;
struct hl_device *hdev; struct hl_device *hdev;
u32 flags; u32 flags;
@ -2811,7 +2814,7 @@ struct hl_mmu_funcs {
/** /**
* struct hl_prefetch_work - prefetch work structure handler * struct hl_prefetch_work - prefetch work structure handler
* @pf_work: actual work struct. * @prefetch_work: actual work struct.
* @ctx: compute context. * @ctx: compute context.
* @va: virtual address to pre-fetch. * @va: virtual address to pre-fetch.
* @size: pre-fetch size. * @size: pre-fetch size.
@ -2819,7 +2822,7 @@ struct hl_mmu_funcs {
* @asid: ASID for maintenance operation. * @asid: ASID for maintenance operation.
*/ */
struct hl_prefetch_work { struct hl_prefetch_work {
struct work_struct pf_work; struct work_struct prefetch_work;
struct hl_ctx *ctx; struct hl_ctx *ctx;
u64 va; u64 va;
u64 size; u64 size;
@ -2925,30 +2928,6 @@ struct cs_timeout_info {
u64 seq; u64 seq;
}; };
/**
* struct razwi_info - info about last razwi error occurred.
* @timestamp: razwi timestamp.
* @write_enable: if set writing to razwi parameters in the structure is enabled.
* otherwise - disabled, so the first (root cause) razwi will not be overwritten.
* @addr: address that caused razwi.
* @engine_id_1: engine id of the razwi initiator, if it was initiated by engine that does
* not have engine id it will be set to U16_MAX.
* @engine_id_2: second engine id of razwi initiator. Might happen that razwi have 2 possible
* engines which one them caused the razwi. In that case, it will contain the
* second possible engine id, otherwise it will be set to U16_MAX.
* @non_engine_initiator: in case the initiator of the razwi does not have engine id.
* @type: cause of razwi, page fault or access error, otherwise it will be set to U8_MAX.
*/
struct razwi_info {
ktime_t timestamp;
atomic_t write_enable;
u64 addr;
u16 engine_id_1;
u16 engine_id_2;
u8 non_engine_initiator;
u8 type;
};
#define MAX_QMAN_STREAMS_INFO 4 #define MAX_QMAN_STREAMS_INFO 4
#define OPCODE_INFO_MAX_ADDR_SIZE 8 #define OPCODE_INFO_MAX_ADDR_SIZE 8
/** /**
@ -2981,16 +2960,38 @@ struct undefined_opcode_info {
bool write_enable; bool write_enable;
}; };
/**
* struct page_fault_info - info about page fault
* @pgf_info: page fault information.
* @user_mappings: buffer containing user mappings.
* @num_of_user_mappings: number of user mappings.
*/
struct page_fault_info {
struct hl_page_fault_info pgf;
struct hl_user_mapping *user_mappings;
u64 num_of_user_mappings;
};
/** /**
* struct hl_error_info - holds information collected during an error. * struct hl_error_info - holds information collected during an error.
* @cs_timeout: CS timeout error information. * @cs_timeout: CS timeout error information.
* @razwi: razwi information. * @razwi: razwi information.
* @razwi_info_recorded: if set writing to razwi information is enabled.
* otherwise - disabled, so the first (root cause) razwi will not be
* overwritten.
* @undef_opcode: undefined opcode information * @undef_opcode: undefined opcode information
* @pgf_info: page fault information.
* @pgf_info_recorded: if set writing to page fault information is enabled.
* otherwise - disabled, so the first (root cause) page fault will not be
* overwritten.
*/ */
struct hl_error_info { struct hl_error_info {
struct cs_timeout_info cs_timeout; struct cs_timeout_info cs_timeout;
struct razwi_info razwi; struct hl_info_razwi_event razwi;
atomic_t razwi_info_recorded;
struct undefined_opcode_info undef_opcode; struct undefined_opcode_info undef_opcode;
struct page_fault_info pgf_info;
atomic_t pgf_info_recorded;
}; };
/** /**
@ -3013,6 +3014,7 @@ struct hl_error_info {
* same cause. * same cause.
* @skip_reset_on_timeout: Skip device reset if CS has timed out, wait for it to * @skip_reset_on_timeout: Skip device reset if CS has timed out, wait for it to
* complete instead. * complete instead.
* @watchdog_active: true if a device release watchdog work is scheduled.
*/ */
struct hl_reset_info { struct hl_reset_info {
spinlock_t lock; spinlock_t lock;
@ -3023,12 +3025,11 @@ struct hl_reset_info {
u8 in_compute_reset; u8 in_compute_reset;
u8 needs_reset; u8 needs_reset;
u8 hard_reset_pending; u8 hard_reset_pending;
u8 curr_reset_cause; u8 curr_reset_cause;
u8 prev_reset_trigger; u8 prev_reset_trigger;
u8 reset_trigger_repeated; u8 reset_trigger_repeated;
u8 skip_reset_on_timeout; u8 skip_reset_on_timeout;
u8 watchdog_active;
}; };
/** /**
@ -3044,6 +3045,8 @@ struct hl_reset_info {
* @dev_ctrl: related kernel device structure for the control device * @dev_ctrl: related kernel device structure for the control device
* @work_heartbeat: delayed work for CPU-CP is-alive check. * @work_heartbeat: delayed work for CPU-CP is-alive check.
* @device_reset_work: delayed work which performs hard reset * @device_reset_work: delayed work which performs hard reset
* @device_release_watchdog_work: watchdog work that performs hard reset if user doesn't release
* device upon certain error cases.
* @asic_name: ASIC specific name. * @asic_name: ASIC specific name.
* @asic_type: ASIC specific type. * @asic_type: ASIC specific type.
* @completion_queue: array of hl_cq. * @completion_queue: array of hl_cq.
@ -3062,7 +3065,8 @@ struct hl_reset_info {
* @cs_cmplt_wq: work queue of CS completions for executing work in process * @cs_cmplt_wq: work queue of CS completions for executing work in process
* context. * context.
* @ts_free_obj_wq: work queue for timestamp registration objects release. * @ts_free_obj_wq: work queue for timestamp registration objects release.
* @pf_wq: work queue for MMU pre-fetch operations. * @prefetch_wq: work queue for MMU pre-fetch operations.
* @reset_wq: work queue for device reset procedure.
* @kernel_ctx: Kernel driver context structure. * @kernel_ctx: Kernel driver context structure.
* @kernel_queues: array of hl_hw_queue. * @kernel_queues: array of hl_hw_queue.
* @cs_mirror_list: CS mirror list for TDR. * @cs_mirror_list: CS mirror list for TDR.
@ -3152,6 +3156,7 @@ struct hl_reset_info {
* indicates which decoder engines are binned-out * indicates which decoder engines are binned-out
* @edma_binning: contains mask of edma engines that is received from the f/w which * @edma_binning: contains mask of edma engines that is received from the f/w which
* indicates which edma engines are binned-out * indicates which edma engines are binned-out
* @device_release_watchdog_timeout_sec: device release watchdog timeout value in seconds.
* @id: device minor. * @id: device minor.
* @id_control: minor of the control device. * @id_control: minor of the control device.
* @cdev_idx: char device index. Used for setting its name. * @cdev_idx: char device index. Used for setting its name.
@ -3221,6 +3226,7 @@ struct hl_device {
struct device *dev_ctrl; struct device *dev_ctrl;
struct delayed_work work_heartbeat; struct delayed_work work_heartbeat;
struct hl_device_reset_work device_reset_work; struct hl_device_reset_work device_reset_work;
struct hl_device_reset_work device_release_watchdog_work;
char asic_name[HL_STR_MAX]; char asic_name[HL_STR_MAX];
char status[HL_DEV_STS_MAX][HL_STR_MAX]; char status[HL_DEV_STS_MAX][HL_STR_MAX];
enum hl_asic_type asic_type; enum hl_asic_type asic_type;
@ -3233,7 +3239,8 @@ struct hl_device {
struct workqueue_struct *eq_wq; struct workqueue_struct *eq_wq;
struct workqueue_struct *cs_cmplt_wq; struct workqueue_struct *cs_cmplt_wq;
struct workqueue_struct *ts_free_obj_wq; struct workqueue_struct *ts_free_obj_wq;
struct workqueue_struct *pf_wq; struct workqueue_struct *prefetch_wq;
struct workqueue_struct *reset_wq;
struct hl_ctx *kernel_ctx; struct hl_ctx *kernel_ctx;
struct hl_hw_queue *kernel_queues; struct hl_hw_queue *kernel_queues;
struct list_head cs_mirror_list; struct list_head cs_mirror_list;
@ -3314,6 +3321,7 @@ struct hl_device {
u32 high_pll; u32 high_pll;
u32 decoder_binning; u32 decoder_binning;
u32 edma_binning; u32 edma_binning;
u32 device_release_watchdog_timeout_sec;
u16 id; u16 id;
u16 id_control; u16 id_control;
u16 cdev_idx; u16 cdev_idx;
@ -3488,6 +3496,8 @@ void hl_asic_dma_pool_free_caller(struct hl_device *hdev, void *vaddr, dma_addr_
int hl_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_data_direction dir); int hl_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_data_direction dir);
void hl_dma_unmap_sgtable(struct hl_device *hdev, struct sg_table *sgt, void hl_dma_unmap_sgtable(struct hl_device *hdev, struct sg_table *sgt,
enum dma_data_direction dir); enum dma_data_direction dir);
int hl_access_sram_dram_region(struct hl_device *hdev, u64 addr, u64 *val,
enum debugfs_access_type acc_type, enum pci_region region_type, bool set_dram_bar);
int hl_access_cfg_region(struct hl_device *hdev, u64 addr, u64 *val, int hl_access_cfg_region(struct hl_device *hdev, u64 addr, u64 *val,
enum debugfs_access_type acc_type); enum debugfs_access_type acc_type);
int hl_access_dev_mem(struct hl_device *hdev, enum pci_region region_type, int hl_access_dev_mem(struct hl_device *hdev, enum pci_region region_type,
@ -3496,6 +3506,8 @@ int hl_device_open(struct inode *inode, struct file *filp);
int hl_device_open_ctrl(struct inode *inode, struct file *filp); int hl_device_open_ctrl(struct inode *inode, struct file *filp);
bool hl_device_operational(struct hl_device *hdev, bool hl_device_operational(struct hl_device *hdev,
enum hl_device_status *status); enum hl_device_status *status);
bool hl_ctrl_device_operational(struct hl_device *hdev,
enum hl_device_status *status);
enum hl_device_status hl_device_status(struct hl_device *hdev); enum hl_device_status hl_device_status(struct hl_device *hdev);
int hl_device_set_debug_mode(struct hl_device *hdev, struct hl_ctx *ctx, bool enable); int hl_device_set_debug_mode(struct hl_device *hdev, struct hl_ctx *ctx, bool enable);
int hl_hw_queues_create(struct hl_device *hdev); int hl_hw_queues_create(struct hl_device *hdev);
@ -3549,6 +3561,7 @@ void hl_device_fini(struct hl_device *hdev);
int hl_device_suspend(struct hl_device *hdev); int hl_device_suspend(struct hl_device *hdev);
int hl_device_resume(struct hl_device *hdev); int hl_device_resume(struct hl_device *hdev);
int hl_device_reset(struct hl_device *hdev, u32 flags); int hl_device_reset(struct hl_device *hdev, u32 flags);
int hl_device_cond_reset(struct hl_device *hdev, u32 flags, u64 event_mask);
void hl_hpriv_get(struct hl_fpriv *hpriv); void hl_hpriv_get(struct hl_fpriv *hpriv);
int hl_hpriv_put(struct hl_fpriv *hpriv); int hl_hpriv_put(struct hl_fpriv *hpriv);
int hl_device_utilization(struct hl_device *hdev, u32 *utilization); int hl_device_utilization(struct hl_device *hdev, u32 *utilization);
@ -3762,7 +3775,8 @@ void hl_sysfs_add_dev_vrm_attr(struct hl_device *hdev, struct attribute_group *d
void hw_sob_get(struct hl_hw_sob *hw_sob); void hw_sob_get(struct hl_hw_sob *hw_sob);
void hw_sob_put(struct hl_hw_sob *hw_sob); void hw_sob_put(struct hl_hw_sob *hw_sob);
void hl_encaps_handle_do_release(struct kref *ref); void hl_encaps_release_handle_and_put_ctx(struct kref *ref);
void hl_encaps_release_handle_and_put_sob_ctx(struct kref *ref);
void hl_hw_queue_encaps_sig_set_sob_info(struct hl_device *hdev, void hl_hw_queue_encaps_sig_set_sob_info(struct hl_device *hdev,
struct hl_cs *cs, struct hl_cs_job *job, struct hl_cs *cs, struct hl_cs_job *job,
struct hl_cs_compl *cs_cmpl); struct hl_cs_compl *cs_cmpl);
@ -3798,6 +3812,13 @@ hl_mmap_mem_buf_alloc(struct hl_mem_mgr *mmg,
struct hl_mmap_mem_buf_behavior *behavior, gfp_t gfp, struct hl_mmap_mem_buf_behavior *behavior, gfp_t gfp,
void *args); void *args);
__printf(2, 3) void hl_engine_data_sprintf(struct engines_data *e, const char *fmt, ...); __printf(2, 3) void hl_engine_data_sprintf(struct engines_data *e, const char *fmt, ...);
void hl_capture_razwi(struct hl_device *hdev, u64 addr, u16 *engine_id, u16 num_of_engines,
u8 flags);
void hl_handle_razwi(struct hl_device *hdev, u64 addr, u16 *engine_id, u16 num_of_engines,
u8 flags, u64 *event_mask);
void hl_capture_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_pmmu);
void hl_handle_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_pmmu,
u64 *event_mask);
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS

View File

@ -9,6 +9,7 @@
#define pr_fmt(fmt) "habanalabs: " fmt #define pr_fmt(fmt) "habanalabs: " fmt
#include "habanalabs.h" #include "habanalabs.h"
#include "../include/hw_ip/pci/pci_general.h"
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/aer.h> #include <linux/aer.h>
@ -74,16 +75,17 @@ MODULE_DEVICE_TABLE(pci, ids);
/* /*
* get_asic_type - translate device id to asic type * get_asic_type - translate device id to asic type
* *
* @device: id of the PCI device * @hdev: pointer to habanalabs device structure.
* *
* Translate device id to asic type. * Translate device id and revision id to asic type.
* In case of unidentified device, return -1 * In case of unidentified device, return -1
*/ */
static enum hl_asic_type get_asic_type(u16 device) static enum hl_asic_type get_asic_type(struct hl_device *hdev)
{ {
enum hl_asic_type asic_type; struct pci_dev *pdev = hdev->pdev;
enum hl_asic_type asic_type = ASIC_INVALID;
switch (device) { switch (pdev->device) {
case PCI_IDS_GOYA: case PCI_IDS_GOYA:
asic_type = ASIC_GOYA; asic_type = ASIC_GOYA;
break; break;
@ -94,10 +96,18 @@ static enum hl_asic_type get_asic_type(u16 device)
asic_type = ASIC_GAUDI_SEC; asic_type = ASIC_GAUDI_SEC;
break; break;
case PCI_IDS_GAUDI2: case PCI_IDS_GAUDI2:
switch (pdev->revision) {
case REV_ID_A:
asic_type = ASIC_GAUDI2; asic_type = ASIC_GAUDI2;
break; break;
case REV_ID_B:
asic_type = ASIC_GAUDI2B;
break;
default:
break;
}
break;
default: default:
asic_type = ASIC_INVALID;
break; break;
} }
@ -212,7 +222,8 @@ int hl_device_open(struct inode *inode, struct file *filp)
hl_debugfs_add_file(hpriv); hl_debugfs_add_file(hpriv);
atomic_set(&hdev->captured_err_info.cs_timeout.write_enable, 1); atomic_set(&hdev->captured_err_info.cs_timeout.write_enable, 1);
atomic_set(&hdev->captured_err_info.razwi.write_enable, 1); atomic_set(&hdev->captured_err_info.razwi_info_recorded, 0);
atomic_set(&hdev->captured_err_info.pgf_info_recorded, 0);
hdev->captured_err_info.undef_opcode.write_enable = true; hdev->captured_err_info.undef_opcode.write_enable = true;
hdev->open_counter++; hdev->open_counter++;
@ -270,9 +281,9 @@ int hl_device_open_ctrl(struct inode *inode, struct file *filp)
mutex_lock(&hdev->fpriv_ctrl_list_lock); mutex_lock(&hdev->fpriv_ctrl_list_lock);
if (!hl_device_operational(hdev, NULL)) { if (!hl_ctrl_device_operational(hdev, NULL)) {
dev_dbg_ratelimited(hdev->dev_ctrl, dev_dbg_ratelimited(hdev->dev_ctrl,
"Can't open %s because it is disabled or in reset\n", "Can't open %s because it is disabled\n",
dev_name(hdev->dev_ctrl)); dev_name(hdev->dev_ctrl));
rc = -EPERM; rc = -EPERM;
goto out_err; goto out_err;
@ -415,7 +426,7 @@ static int create_hdev(struct hl_device **dev, struct pci_dev *pdev)
/* First, we must find out which ASIC are we handling. This is needed /* First, we must find out which ASIC are we handling. This is needed
* to configure the behavior of the driver (kernel parameters) * to configure the behavior of the driver (kernel parameters)
*/ */
hdev->asic_type = get_asic_type(pdev->device); hdev->asic_type = get_asic_type(hdev);
if (hdev->asic_type == ASIC_INVALID) { if (hdev->asic_type == ASIC_INVALID) {
dev_err(&pdev->dev, "Unsupported ASIC\n"); dev_err(&pdev->dev, "Unsupported ASIC\n");
rc = -ENODEV; rc = -ENODEV;
@ -594,15 +605,16 @@ hl_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t state)
switch (state) { switch (state) {
case pci_channel_io_normal: case pci_channel_io_normal:
dev_warn(hdev->dev, "PCI normal state error detected\n");
return PCI_ERS_RESULT_CAN_RECOVER; return PCI_ERS_RESULT_CAN_RECOVER;
case pci_channel_io_frozen: case pci_channel_io_frozen:
dev_warn(hdev->dev, "frozen state error detected\n"); dev_warn(hdev->dev, "PCI frozen state error detected\n");
result = PCI_ERS_RESULT_NEED_RESET; result = PCI_ERS_RESULT_NEED_RESET;
break; break;
case pci_channel_io_perm_failure: case pci_channel_io_perm_failure:
dev_warn(hdev->dev, "failure state error detected\n"); dev_warn(hdev->dev, "PCI failure state error detected\n");
result = PCI_ERS_RESULT_DISCONNECT; result = PCI_ERS_RESULT_DISCONNECT;
break; break;
@ -638,6 +650,10 @@ static void hl_pci_err_resume(struct pci_dev *pdev)
*/ */
static pci_ers_result_t hl_pci_err_slot_reset(struct pci_dev *pdev) static pci_ers_result_t hl_pci_err_slot_reset(struct pci_dev *pdev)
{ {
struct hl_device *hdev = pci_get_drvdata(pdev);
dev_warn(hdev->dev, "PCI slot reset detected\n");
return PCI_ERS_RESULT_RECOVERED; return PCI_ERS_RESULT_RECOVERED;
} }

View File

@ -10,10 +10,11 @@
#include <uapi/misc/habanalabs.h> #include <uapi/misc/habanalabs.h>
#include "habanalabs.h" #include "habanalabs.h"
#include <linux/kernel.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/uaccess.h> #include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
static u32 hl_debug_struct_size[HL_DEBUG_OP_TIMESTAMP + 1] = { static u32 hl_debug_struct_size[HL_DEBUG_OP_TIMESTAMP + 1] = {
@ -105,6 +106,7 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
hw_ip.edma_enabled_mask = prop->edma_enabled_mask; hw_ip.edma_enabled_mask = prop->edma_enabled_mask;
hw_ip.server_type = prop->server_type; hw_ip.server_type = prop->server_type;
hw_ip.security_enabled = prop->fw_security_enabled; hw_ip.security_enabled = prop->fw_security_enabled;
hw_ip.revision_id = hdev->pdev->revision;
return copy_to_user(out, &hw_ip, return copy_to_user(out, &hw_ip,
min((size_t) size, sizeof(hw_ip))) ? -EFAULT : 0; min((size_t) size, sizeof(hw_ip))) ? -EFAULT : 0;
@ -121,6 +123,10 @@ static int hw_events_info(struct hl_device *hdev, bool aggregate,
return -EINVAL; return -EINVAL;
arr = hdev->asic_funcs->get_events_stat(hdev, aggregate, &size); arr = hdev->asic_funcs->get_events_stat(hdev, aggregate, &size);
if (!arr) {
dev_err(hdev->dev, "Events info not supported\n");
return -EOPNOTSUPP;
}
return copy_to_user(out, arr, min(max_size, size)) ? -EFAULT : 0; return copy_to_user(out, arr, min(max_size, size)) ? -EFAULT : 0;
} }
@ -603,20 +609,14 @@ static int razwi_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{ {
struct hl_device *hdev = hpriv->hdev; struct hl_device *hdev = hpriv->hdev;
u32 max_size = args->return_size; u32 max_size = args->return_size;
struct hl_info_razwi_event info = {0}; struct hl_info_razwi_event *info = &hdev->captured_err_info.razwi;
void __user *out = (void __user *) (uintptr_t) args->return_pointer; void __user *out = (void __user *) (uintptr_t) args->return_pointer;
if ((!max_size) || (!out)) if ((!max_size) || (!out))
return -EINVAL; return -EINVAL;
info.timestamp = ktime_to_ns(hdev->captured_err_info.razwi.timestamp); return copy_to_user(out, info, min_t(size_t, max_size, sizeof(struct hl_info_razwi_event)))
info.addr = hdev->captured_err_info.razwi.addr; ? -EFAULT : 0;
info.engine_id_1 = hdev->captured_err_info.razwi.engine_id_1;
info.engine_id_2 = hdev->captured_err_info.razwi.engine_id_2;
info.no_engine_id = hdev->captured_err_info.razwi.non_engine_initiator;
info.error_type = hdev->captured_err_info.razwi.type;
return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
} }
static int undefined_opcode_info(struct hl_fpriv *hpriv, struct hl_info_args *args) static int undefined_opcode_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
@ -784,6 +784,42 @@ static int engine_status_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
return rc; return rc;
} }
static int page_fault_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
struct hl_device *hdev = hpriv->hdev;
u32 max_size = args->return_size;
struct hl_page_fault_info *info = &hdev->captured_err_info.pgf_info.pgf;
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
if ((!max_size) || (!out))
return -EINVAL;
return copy_to_user(out, info, min_t(size_t, max_size, sizeof(struct hl_page_fault_info)))
? -EFAULT : 0;
}
static int user_mappings_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
u32 user_buf_size = args->return_size;
struct hl_device *hdev = hpriv->hdev;
struct page_fault_info *pgf_info;
u64 actual_size;
pgf_info = &hdev->captured_err_info.pgf_info;
args->array_size = pgf_info->num_of_user_mappings;
if (!out)
return -EINVAL;
actual_size = pgf_info->num_of_user_mappings * sizeof(struct hl_user_mapping);
if (user_buf_size < actual_size)
return -ENOMEM;
return copy_to_user(out, pgf_info->user_mappings, min_t(size_t, user_buf_size, actual_size))
? -EFAULT : 0;
}
static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data, static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
struct device *dev) struct device *dev)
{ {
@ -843,6 +879,15 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
case HL_INFO_GET_EVENTS: case HL_INFO_GET_EVENTS:
return events_info(hpriv, args); return events_info(hpriv, args);
case HL_INFO_PAGE_FAULT_EVENT:
return page_fault_info(hpriv, args);
case HL_INFO_USER_MAPPINGS:
return user_mappings_info(hpriv, args);
case HL_INFO_UNREGISTER_EVENTFD:
return eventfd_unregister(hpriv, args);
default: default:
break; break;
} }
@ -899,9 +944,6 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
case HL_INFO_REGISTER_EVENTFD: case HL_INFO_REGISTER_EVENTFD:
return eventfd_register(hpriv, args); return eventfd_register(hpriv, args);
case HL_INFO_UNREGISTER_EVENTFD:
return eventfd_unregister(hpriv, args);
case HL_INFO_ENGINE_STATUS: case HL_INFO_ENGINE_STATUS:
return engine_status_info(hpriv, args); return engine_status_info(hpriv, args);

View File

@ -1689,7 +1689,7 @@ static int hl_dmabuf_attach(struct dma_buf *dmabuf,
hl_dmabuf = dmabuf->priv; hl_dmabuf = dmabuf->priv;
hdev = hl_dmabuf->ctx->hdev; hdev = hl_dmabuf->ctx->hdev;
rc = pci_p2pdma_distance_many(hdev->pdev, &attachment->dev, 1, true); rc = pci_p2pdma_distance(hdev->pdev, attachment->dev, true);
if (rc < 0) if (rc < 0)
attachment->peer2peer = false; attachment->peer2peer = false;
@ -2109,7 +2109,7 @@ static int hl_ts_alloc_buf(struct hl_mmap_mem_buf *buf, gfp_t gfp, void *args)
/* Allocate the internal kernel buffer */ /* Allocate the internal kernel buffer */
size = num_elements * sizeof(struct hl_user_pending_interrupt); size = num_elements * sizeof(struct hl_user_pending_interrupt);
p = vmalloc(size); p = vzalloc(size);
if (!p) if (!p)
goto free_user_buff; goto free_user_buff;
@ -2508,24 +2508,20 @@ static int va_range_init(struct hl_device *hdev, struct hl_va_range **va_ranges,
/* /*
* PAGE_SIZE alignment * PAGE_SIZE alignment
* it is the callers responsibility to align the addresses if the * it is the caller's responsibility to align the addresses if the
* page size is not a power of 2 * page size is not a power of 2
*/ */
if (is_power_of_2(page_size)) { if (is_power_of_2(page_size)) {
if (start & (PAGE_SIZE - 1)) { start = round_up(start, page_size);
start &= PAGE_MASK;
start += PAGE_SIZE;
}
/* /*
* The end of the range is inclusive, hence we need to align it * The end of the range is inclusive, hence we need to align it
* to the end of the last full page in the range. For example if * to the end of the last full page in the range. For example if
* end = 0x3ff5 with page size 0x1000, we need to align it to * end = 0x3ff5 with page size 0x1000, we need to align it to
* 0x2fff. The remainig 0xff5 bytes do not form a full page. * 0x2fff. The remaining 0xff5 bytes do not form a full page.
*/ */
if ((end + 1) & (PAGE_SIZE - 1)) end = round_down(end + 1, page_size) - 1;
end = ((end + 1) & PAGE_MASK) - 1;
} }
if (start >= end) { if (start >= end) {

View File

@ -635,7 +635,7 @@ int hl_mmu_if_set_funcs(struct hl_device *hdev)
hl_mmu_v1_set_funcs(hdev, &hdev->mmu_func[MMU_DR_PGT]); hl_mmu_v1_set_funcs(hdev, &hdev->mmu_func[MMU_DR_PGT]);
break; break;
case ASIC_GAUDI2: case ASIC_GAUDI2:
case ASIC_GAUDI2_SEC: case ASIC_GAUDI2B:
/* MMUs in Gaudi2 are always host resident */ /* MMUs in Gaudi2 are always host resident */
hl_mmu_v2_hr_set_funcs(hdev, &hdev->mmu_func[MMU_HR_PGT]); hl_mmu_v2_hr_set_funcs(hdev, &hdev->mmu_func[MMU_HR_PGT]);
break; break;
@ -699,7 +699,7 @@ int hl_mmu_invalidate_cache_range(struct hl_device *hdev, bool is_hard,
static void hl_mmu_prefetch_work_function(struct work_struct *work) static void hl_mmu_prefetch_work_function(struct work_struct *work)
{ {
struct hl_prefetch_work *pfw = container_of(work, struct hl_prefetch_work, pf_work); struct hl_prefetch_work *pfw = container_of(work, struct hl_prefetch_work, prefetch_work);
struct hl_ctx *ctx = pfw->ctx; struct hl_ctx *ctx = pfw->ctx;
struct hl_device *hdev = ctx->hdev; struct hl_device *hdev = ctx->hdev;
@ -723,25 +723,25 @@ put_ctx:
int hl_mmu_prefetch_cache_range(struct hl_ctx *ctx, u32 flags, u32 asid, u64 va, u64 size) int hl_mmu_prefetch_cache_range(struct hl_ctx *ctx, u32 flags, u32 asid, u64 va, u64 size)
{ {
struct hl_prefetch_work *handle_pf_work; struct hl_prefetch_work *handle_prefetch_work;
handle_pf_work = kmalloc(sizeof(*handle_pf_work), GFP_KERNEL); handle_prefetch_work = kmalloc(sizeof(*handle_prefetch_work), GFP_KERNEL);
if (!handle_pf_work) if (!handle_prefetch_work)
return -ENOMEM; return -ENOMEM;
INIT_WORK(&handle_pf_work->pf_work, hl_mmu_prefetch_work_function); INIT_WORK(&handle_prefetch_work->prefetch_work, hl_mmu_prefetch_work_function);
handle_pf_work->ctx = ctx; handle_prefetch_work->ctx = ctx;
handle_pf_work->va = va; handle_prefetch_work->va = va;
handle_pf_work->size = size; handle_prefetch_work->size = size;
handle_pf_work->flags = flags; handle_prefetch_work->flags = flags;
handle_pf_work->asid = asid; handle_prefetch_work->asid = asid;
/* /*
* as actual prefetch is done in a WQ we must get the context (and put it * as actual prefetch is done in a WQ we must get the context (and put it
* at the end of the work function) * at the end of the work function)
*/ */
hl_ctx_get(ctx); hl_ctx_get(ctx);
queue_work(ctx->hdev->pf_wq, &handle_pf_work->pf_work); queue_work(ctx->hdev->prefetch_wq, &handle_prefetch_work->prefetch_work);
return 0; return 0;
} }

View File

@ -248,8 +248,8 @@ static ssize_t device_type_show(struct device *dev,
case ASIC_GAUDI2: case ASIC_GAUDI2:
str = "GAUDI2"; str = "GAUDI2";
break; break;
case ASIC_GAUDI2_SEC: case ASIC_GAUDI2B:
str = "GAUDI2 SEC"; str = "GAUDI2B";
break; break;
default: default:
dev_err(hdev->dev, "Unrecognized ASIC type %d\n", dev_err(hdev->dev, "Unrecognized ASIC type %d\n",

View File

@ -6505,8 +6505,8 @@ event_not_supported:
} }
static const char *gaudi_get_razwi_initiator_dma_name(struct hl_device *hdev, u32 x_y, static const char *gaudi_get_razwi_initiator_dma_name(struct hl_device *hdev, u32 x_y,
bool is_write, s32 *engine_id_1, bool is_write, u16 *engine_id_1,
s32 *engine_id_2) u16 *engine_id_2)
{ {
u32 dma_id[2], dma_offset, err_cause[2], mask, i; u32 dma_id[2], dma_offset, err_cause[2], mask, i;
@ -6603,7 +6603,7 @@ unknown_initiator:
} }
static const char *gaudi_get_razwi_initiator_name(struct hl_device *hdev, bool is_write, static const char *gaudi_get_razwi_initiator_name(struct hl_device *hdev, bool is_write,
u32 *engine_id_1, u32 *engine_id_2) u16 *engine_id_1, u16 *engine_id_2)
{ {
u32 val, x_y, axi_id; u32 val, x_y, axi_id;
@ -6719,8 +6719,8 @@ static const char *gaudi_get_razwi_initiator_name(struct hl_device *hdev, bool i
return "unknown initiator"; return "unknown initiator";
} }
static void gaudi_print_and_get_razwi_info(struct hl_device *hdev, u32 *engine_id_1, static void gaudi_print_and_get_razwi_info(struct hl_device *hdev, u16 *engine_id_1,
u32 *engine_id_2) u16 *engine_id_2, bool *is_read, bool *is_write)
{ {
if (RREG32(mmMMU_UP_RAZWI_WRITE_VLD)) { if (RREG32(mmMMU_UP_RAZWI_WRITE_VLD)) {
@ -6728,6 +6728,7 @@ static void gaudi_print_and_get_razwi_info(struct hl_device *hdev, u32 *engine_i
"RAZWI event caused by illegal write of %s\n", "RAZWI event caused by illegal write of %s\n",
gaudi_get_razwi_initiator_name(hdev, true, engine_id_1, engine_id_2)); gaudi_get_razwi_initiator_name(hdev, true, engine_id_1, engine_id_2));
WREG32(mmMMU_UP_RAZWI_WRITE_VLD, 0); WREG32(mmMMU_UP_RAZWI_WRITE_VLD, 0);
*is_write = true;
} }
if (RREG32(mmMMU_UP_RAZWI_READ_VLD)) { if (RREG32(mmMMU_UP_RAZWI_READ_VLD)) {
@ -6735,10 +6736,11 @@ static void gaudi_print_and_get_razwi_info(struct hl_device *hdev, u32 *engine_i
"RAZWI event caused by illegal read of %s\n", "RAZWI event caused by illegal read of %s\n",
gaudi_get_razwi_initiator_name(hdev, false, engine_id_1, engine_id_2)); gaudi_get_razwi_initiator_name(hdev, false, engine_id_1, engine_id_2));
WREG32(mmMMU_UP_RAZWI_READ_VLD, 0); WREG32(mmMMU_UP_RAZWI_READ_VLD, 0);
*is_read = true;
} }
} }
static void gaudi_print_and_get_mmu_error_info(struct hl_device *hdev, u64 *addr, u8 *type) static void gaudi_print_and_get_mmu_error_info(struct hl_device *hdev, u64 *addr, u64 *event_mask)
{ {
struct gaudi_device *gaudi = hdev->asic_specific; struct gaudi_device *gaudi = hdev->asic_specific;
u32 val; u32 val;
@ -6753,7 +6755,7 @@ static void gaudi_print_and_get_mmu_error_info(struct hl_device *hdev, u64 *addr
*addr |= RREG32(mmMMU_UP_PAGE_ERROR_CAPTURE_VA); *addr |= RREG32(mmMMU_UP_PAGE_ERROR_CAPTURE_VA);
dev_err_ratelimited(hdev->dev, "MMU page fault on va 0x%llx\n", *addr); dev_err_ratelimited(hdev->dev, "MMU page fault on va 0x%llx\n", *addr);
*type = HL_RAZWI_PAGE_FAULT; hl_handle_page_fault(hdev, *addr, 0, true, event_mask);
WREG32(mmMMU_UP_PAGE_ERROR_CAPTURE, 0); WREG32(mmMMU_UP_PAGE_ERROR_CAPTURE, 0);
} }
@ -6765,7 +6767,6 @@ static void gaudi_print_and_get_mmu_error_info(struct hl_device *hdev, u64 *addr
*addr |= RREG32(mmMMU_UP_ACCESS_ERROR_CAPTURE_VA); *addr |= RREG32(mmMMU_UP_ACCESS_ERROR_CAPTURE_VA);
dev_err_ratelimited(hdev->dev, "MMU access error on va 0x%llx\n", *addr); dev_err_ratelimited(hdev->dev, "MMU access error on va 0x%llx\n", *addr);
*type = HL_RAZWI_MMU_ACCESS_ERROR;
WREG32(mmMMU_UP_ACCESS_ERROR_CAPTURE, 0); WREG32(mmMMU_UP_ACCESS_ERROR_CAPTURE, 0);
} }
@ -7300,48 +7301,44 @@ static void gaudi_handle_qman_err(struct hl_device *hdev, u16 event_type, u64 *e
} }
static void gaudi_print_irq_info(struct hl_device *hdev, u16 event_type, static void gaudi_print_irq_info(struct hl_device *hdev, u16 event_type,
bool razwi) bool razwi, u64 *event_mask)
{ {
u32 engine_id_1, engine_id_2; bool is_read = false, is_write = false;
u16 engine_id[2], num_of_razwi_eng = 0;
char desc[64] = ""; char desc[64] = "";
u64 razwi_addr = 0; u64 razwi_addr = 0;
u8 razwi_type; u8 razwi_flags = 0;
int rc;
/* /*
* Init engine id by default as not valid and only if razwi initiated from engine with * Init engine id by default as not valid and only if razwi initiated from engine with
* engine id it will get valid value. * engine id it will get valid value.
* Init razwi type to default, will be changed only if razwi caused by page fault of
* MMU access error
*/ */
engine_id_1 = U16_MAX; engine_id[0] = HL_RAZWI_NA_ENG_ID;
engine_id_2 = U16_MAX; engine_id[1] = HL_RAZWI_NA_ENG_ID;
razwi_type = U8_MAX;
gaudi_get_event_desc(event_type, desc, sizeof(desc)); gaudi_get_event_desc(event_type, desc, sizeof(desc));
dev_err_ratelimited(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n", dev_err_ratelimited(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n",
event_type, desc); event_type, desc);
if (razwi) { if (razwi) {
gaudi_print_and_get_razwi_info(hdev, &engine_id_1, &engine_id_2); gaudi_print_and_get_razwi_info(hdev, &engine_id[0], &engine_id[1], &is_read,
gaudi_print_and_get_mmu_error_info(hdev, &razwi_addr, &razwi_type); &is_write);
gaudi_print_and_get_mmu_error_info(hdev, &razwi_addr, event_mask);
/* In case it's the first razwi, save its parameters*/ if (is_read)
rc = atomic_cmpxchg(&hdev->captured_err_info.razwi.write_enable, 1, 0); razwi_flags |= HL_RAZWI_READ;
if (rc) { if (is_write)
hdev->captured_err_info.razwi.timestamp = ktime_get(); razwi_flags |= HL_RAZWI_WRITE;
hdev->captured_err_info.razwi.addr = razwi_addr;
hdev->captured_err_info.razwi.engine_id_1 = engine_id_1;
hdev->captured_err_info.razwi.engine_id_2 = engine_id_2;
/*
* If first engine id holds non valid value the razwi initiator
* does not have engine id
*/
hdev->captured_err_info.razwi.non_engine_initiator =
(engine_id_1 == U16_MAX);
hdev->captured_err_info.razwi.type = razwi_type;
if (engine_id[0] != HL_RAZWI_NA_ENG_ID) {
if (engine_id[1] != HL_RAZWI_NA_ENG_ID)
num_of_razwi_eng = 2;
else
num_of_razwi_eng = 1;
} }
hl_handle_razwi(hdev, razwi_addr, engine_id, num_of_razwi_eng, razwi_flags,
event_mask);
} }
} }
@ -7350,8 +7347,8 @@ static void gaudi_print_out_of_sync_info(struct hl_device *hdev,
{ {
struct hl_hw_queue *q = &hdev->kernel_queues[GAUDI_QUEUE_ID_CPU_PQ]; struct hl_hw_queue *q = &hdev->kernel_queues[GAUDI_QUEUE_ID_CPU_PQ];
dev_err(hdev->dev, "Out of sync with FW, FW: pi=%u, ci=%u, LKD: pi=%u, ci=%u\n", dev_err(hdev->dev, "Out of sync with FW, FW: pi=%u, ci=%u, LKD: pi=%u, ci=%d\n",
sync_err->pi, sync_err->ci, q->pi, atomic_read(&q->ci)); le32_to_cpu(sync_err->pi), le32_to_cpu(sync_err->ci), q->pi, atomic_read(&q->ci));
} }
static void gaudi_print_fw_alive_info(struct hl_device *hdev, static void gaudi_print_fw_alive_info(struct hl_device *hdev,
@ -7359,9 +7356,10 @@ static void gaudi_print_fw_alive_info(struct hl_device *hdev,
{ {
dev_err(hdev->dev, dev_err(hdev->dev,
"FW alive report: severity=%s, process_id=%u, thread_id=%u, uptime=%llu seconds\n", "FW alive report: severity=%s, process_id=%u, thread_id=%u, uptime=%llu seconds\n",
(fw_alive->severity == FW_ALIVE_SEVERITY_MINOR) ? (fw_alive->severity == FW_ALIVE_SEVERITY_MINOR) ? "Minor" : "Critical",
"Minor" : "Critical", fw_alive->process_id, le32_to_cpu(fw_alive->process_id),
fw_alive->thread_id, fw_alive->uptime_seconds); le32_to_cpu(fw_alive->thread_id),
le64_to_cpu(fw_alive->uptime_seconds));
} }
static void gaudi_print_nic_axi_irq_info(struct hl_device *hdev, u16 event_type, static void gaudi_print_nic_axi_irq_info(struct hl_device *hdev, u16 event_type,
@ -7679,7 +7677,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
case GAUDI_EVENT_HBM_0_DERR ... GAUDI_EVENT_HBM_3_DERR: case GAUDI_EVENT_HBM_0_DERR ... GAUDI_EVENT_HBM_3_DERR:
case GAUDI_EVENT_MMU_DERR: case GAUDI_EVENT_MMU_DERR:
case GAUDI_EVENT_NIC0_CS_DBG_DERR ... GAUDI_EVENT_NIC4_CS_DBG_DERR: case GAUDI_EVENT_NIC0_CS_DBG_DERR ... GAUDI_EVENT_NIC4_CS_DBG_DERR:
gaudi_print_irq_info(hdev, event_type, true); gaudi_print_irq_info(hdev, event_type, true, &event_mask);
gaudi_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data); gaudi_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data);
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
fw_fatal_err_flag = HL_DRV_RESET_FW_FATAL_ERR; fw_fatal_err_flag = HL_DRV_RESET_FW_FATAL_ERR;
@ -7689,7 +7687,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
case GAUDI_EVENT_AXI_ECC: case GAUDI_EVENT_AXI_ECC:
case GAUDI_EVENT_L2_RAM_ECC: case GAUDI_EVENT_L2_RAM_ECC:
case GAUDI_EVENT_PLL0 ... GAUDI_EVENT_PLL17: case GAUDI_EVENT_PLL0 ... GAUDI_EVENT_PLL17:
gaudi_print_irq_info(hdev, event_type, false); gaudi_print_irq_info(hdev, event_type, false, &event_mask);
fw_fatal_err_flag = HL_DRV_RESET_FW_FATAL_ERR; fw_fatal_err_flag = HL_DRV_RESET_FW_FATAL_ERR;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
goto reset_device; goto reset_device;
@ -7698,7 +7696,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
case GAUDI_EVENT_HBM1_SPI_0: case GAUDI_EVENT_HBM1_SPI_0:
case GAUDI_EVENT_HBM2_SPI_0: case GAUDI_EVENT_HBM2_SPI_0:
case GAUDI_EVENT_HBM3_SPI_0: case GAUDI_EVENT_HBM3_SPI_0:
gaudi_print_irq_info(hdev, event_type, false); gaudi_print_irq_info(hdev, event_type, false, &event_mask);
gaudi_hbm_read_interrupts(hdev, gaudi_hbm_read_interrupts(hdev,
gaudi_hbm_event_to_dev(event_type), gaudi_hbm_event_to_dev(event_type),
&eq_entry->hbm_ecc_data); &eq_entry->hbm_ecc_data);
@ -7710,7 +7708,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
case GAUDI_EVENT_HBM1_SPI_1: case GAUDI_EVENT_HBM1_SPI_1:
case GAUDI_EVENT_HBM2_SPI_1: case GAUDI_EVENT_HBM2_SPI_1:
case GAUDI_EVENT_HBM3_SPI_1: case GAUDI_EVENT_HBM3_SPI_1:
gaudi_print_irq_info(hdev, event_type, false); gaudi_print_irq_info(hdev, event_type, false, &event_mask);
gaudi_hbm_read_interrupts(hdev, gaudi_hbm_read_interrupts(hdev,
gaudi_hbm_event_to_dev(event_type), gaudi_hbm_event_to_dev(event_type),
&eq_entry->hbm_ecc_data); &eq_entry->hbm_ecc_data);
@ -7732,7 +7730,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
* if the event is a TPC Assertion or a "real" TPC DEC. * if the event is a TPC Assertion or a "real" TPC DEC.
*/ */
event_mask |= HL_NOTIFIER_EVENT_TPC_ASSERT; event_mask |= HL_NOTIFIER_EVENT_TPC_ASSERT;
gaudi_print_irq_info(hdev, event_type, true); gaudi_print_irq_info(hdev, event_type, true, &event_mask);
reset_required = gaudi_tpc_read_interrupts(hdev, reset_required = gaudi_tpc_read_interrupts(hdev,
tpc_dec_event_to_tpc_id(event_type), tpc_dec_event_to_tpc_id(event_type),
"AXI_SLV_DEC_Error"); "AXI_SLV_DEC_Error");
@ -7757,7 +7755,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
case GAUDI_EVENT_TPC5_KRN_ERR: case GAUDI_EVENT_TPC5_KRN_ERR:
case GAUDI_EVENT_TPC6_KRN_ERR: case GAUDI_EVENT_TPC6_KRN_ERR:
case GAUDI_EVENT_TPC7_KRN_ERR: case GAUDI_EVENT_TPC7_KRN_ERR:
gaudi_print_irq_info(hdev, event_type, true); gaudi_print_irq_info(hdev, event_type, true, &event_mask);
reset_required = gaudi_tpc_read_interrupts(hdev, reset_required = gaudi_tpc_read_interrupts(hdev,
tpc_krn_event_to_tpc_id(event_type), tpc_krn_event_to_tpc_id(event_type),
"KRN_ERR"); "KRN_ERR");
@ -7796,7 +7794,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
case GAUDI_EVENT_HBM_0_SERR ... GAUDI_EVENT_HBM_3_SERR: case GAUDI_EVENT_HBM_0_SERR ... GAUDI_EVENT_HBM_3_SERR:
fallthrough; fallthrough;
case GAUDI_EVENT_MMU_SERR: case GAUDI_EVENT_MMU_SERR:
gaudi_print_irq_info(hdev, event_type, true); gaudi_print_irq_info(hdev, event_type, true, &event_mask);
gaudi_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data); gaudi_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data);
hl_fw_unmask_irq(hdev, event_type); hl_fw_unmask_irq(hdev, event_type);
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
@ -7806,14 +7804,14 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
case GAUDI_EVENT_CPU_AXI_SPLITTER: case GAUDI_EVENT_CPU_AXI_SPLITTER:
case GAUDI_EVENT_PSOC_AXI_DEC: case GAUDI_EVENT_PSOC_AXI_DEC:
case GAUDI_EVENT_PSOC_PRSTN_FALL: case GAUDI_EVENT_PSOC_PRSTN_FALL:
gaudi_print_irq_info(hdev, event_type, true); gaudi_print_irq_info(hdev, event_type, true, &event_mask);
hl_fw_unmask_irq(hdev, event_type); hl_fw_unmask_irq(hdev, event_type);
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break; break;
case GAUDI_EVENT_MMU_PAGE_FAULT: case GAUDI_EVENT_MMU_PAGE_FAULT:
case GAUDI_EVENT_MMU_WR_PERM: case GAUDI_EVENT_MMU_WR_PERM:
gaudi_print_irq_info(hdev, event_type, true); gaudi_print_irq_info(hdev, event_type, true, &event_mask);
hl_fw_unmask_irq(hdev, event_type); hl_fw_unmask_irq(hdev, event_type);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break; break;
@ -7842,14 +7840,14 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
case GAUDI_EVENT_NIC4_QM1: case GAUDI_EVENT_NIC4_QM1:
case GAUDI_EVENT_DMA0_CORE ... GAUDI_EVENT_DMA7_CORE: case GAUDI_EVENT_DMA0_CORE ... GAUDI_EVENT_DMA7_CORE:
case GAUDI_EVENT_TPC0_QM ... GAUDI_EVENT_TPC7_QM: case GAUDI_EVENT_TPC0_QM ... GAUDI_EVENT_TPC7_QM:
gaudi_print_irq_info(hdev, event_type, true); gaudi_print_irq_info(hdev, event_type, true, &event_mask);
gaudi_handle_qman_err(hdev, event_type, &event_mask); gaudi_handle_qman_err(hdev, event_type, &event_mask);
hl_fw_unmask_irq(hdev, event_type); hl_fw_unmask_irq(hdev, event_type);
event_mask |= (HL_NOTIFIER_EVENT_USER_ENGINE_ERR | HL_NOTIFIER_EVENT_DEVICE_RESET); event_mask |= (HL_NOTIFIER_EVENT_USER_ENGINE_ERR | HL_NOTIFIER_EVENT_DEVICE_RESET);
break; break;
case GAUDI_EVENT_RAZWI_OR_ADC_SW: case GAUDI_EVENT_RAZWI_OR_ADC_SW:
gaudi_print_irq_info(hdev, event_type, true); gaudi_print_irq_info(hdev, event_type, true, &event_mask);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
goto reset_device; goto reset_device;
@ -7862,7 +7860,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
case GAUDI_EVENT_TPC6_BMON_SPMU: case GAUDI_EVENT_TPC6_BMON_SPMU:
case GAUDI_EVENT_TPC7_BMON_SPMU: case GAUDI_EVENT_TPC7_BMON_SPMU:
case GAUDI_EVENT_DMA_BM_CH0 ... GAUDI_EVENT_DMA_BM_CH7: case GAUDI_EVENT_DMA_BM_CH0 ... GAUDI_EVENT_DMA_BM_CH7:
gaudi_print_irq_info(hdev, event_type, false); gaudi_print_irq_info(hdev, event_type, false, &event_mask);
hl_fw_unmask_irq(hdev, event_type); hl_fw_unmask_irq(hdev, event_type);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break; break;
@ -7874,7 +7872,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
break; break;
case GAUDI_EVENT_DMA_IF_SEI_0 ... GAUDI_EVENT_DMA_IF_SEI_3: case GAUDI_EVENT_DMA_IF_SEI_0 ... GAUDI_EVENT_DMA_IF_SEI_3:
gaudi_print_irq_info(hdev, event_type, false); gaudi_print_irq_info(hdev, event_type, false, &event_mask);
gaudi_print_sm_sei_info(hdev, event_type, gaudi_print_sm_sei_info(hdev, event_type,
&eq_entry->sm_sei_data); &eq_entry->sm_sei_data);
rc = hl_state_dump(hdev); rc = hl_state_dump(hdev);
@ -7903,18 +7901,18 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
break; break;
case GAUDI_EVENT_DEV_RESET_REQ: case GAUDI_EVENT_DEV_RESET_REQ:
gaudi_print_irq_info(hdev, event_type, false); gaudi_print_irq_info(hdev, event_type, false, &event_mask);
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
goto reset_device; goto reset_device;
case GAUDI_EVENT_PKT_QUEUE_OUT_SYNC: case GAUDI_EVENT_PKT_QUEUE_OUT_SYNC:
gaudi_print_irq_info(hdev, event_type, false); gaudi_print_irq_info(hdev, event_type, false, &event_mask);
gaudi_print_out_of_sync_info(hdev, &eq_entry->pkt_sync_err); gaudi_print_out_of_sync_info(hdev, &eq_entry->pkt_sync_err);
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
goto reset_device; goto reset_device;
case GAUDI_EVENT_FW_ALIVE_S: case GAUDI_EVENT_FW_ALIVE_S:
gaudi_print_irq_info(hdev, event_type, false); gaudi_print_irq_info(hdev, event_type, false, &event_mask);
gaudi_print_fw_alive_info(hdev, &eq_entry->fw_alive); gaudi_print_fw_alive_info(hdev, &eq_entry->fw_alive);
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
goto reset_device; goto reset_device;
@ -7946,14 +7944,14 @@ reset_device:
reset_required = false; reset_required = false;
} }
/* despite reset doesn't execute. a notification on if (reset_required) {
* occurred event needs to be sent here hl_device_cond_reset(hdev, flags, event_mask);
*/ } else {
hl_notifier_event_send_all(hdev, event_mask);
if (reset_required)
hl_device_reset(hdev, flags);
else
hl_fw_unmask_irq(hdev, event_type); hl_fw_unmask_irq(hdev, event_type);
/* Notification on occurred event needs to be sent although reset is not executed */
if (event_mask)
hl_notifier_event_send_all(hdev, event_mask);
}
} }
static void *gaudi_get_events_stat(struct hl_device *hdev, bool aggregate, u32 *size) static void *gaudi_get_events_stat(struct hl_device *hdev, bool aggregate, u32 *size)

File diff suppressed because it is too large Load Diff

View File

@ -23,8 +23,6 @@
#define GAUDI2_CPU_TIMEOUT_USEC 30000000 /* 30s */ #define GAUDI2_CPU_TIMEOUT_USEC 30000000 /* 30s */
#define GAUDI2_FPGA_CPU_TIMEOUT 100000000 /* 100s */
#define NUMBER_OF_PDMA_QUEUES 2 #define NUMBER_OF_PDMA_QUEUES 2
#define NUMBER_OF_EDMA_QUEUES 8 #define NUMBER_OF_EDMA_QUEUES 8
#define NUMBER_OF_MME_QUEUES 4 #define NUMBER_OF_MME_QUEUES 4

View File

@ -1764,6 +1764,7 @@ static const struct range gaudi2_pb_nic0_qm_arc_aux0_unsecured_regs[] = {
{mmNIC0_QM_ARC_AUX0_CLUSTER_NUM, mmNIC0_QM_ARC_AUX0_WAKE_UP_EVENT}, {mmNIC0_QM_ARC_AUX0_CLUSTER_NUM, mmNIC0_QM_ARC_AUX0_WAKE_UP_EVENT},
{mmNIC0_QM_ARC_AUX0_ARC_RST_REQ, mmNIC0_QM_ARC_AUX0_CID_OFFSET_7}, {mmNIC0_QM_ARC_AUX0_ARC_RST_REQ, mmNIC0_QM_ARC_AUX0_CID_OFFSET_7},
{mmNIC0_QM_ARC_AUX0_SCRATCHPAD_0, mmNIC0_QM_ARC_AUX0_INFLIGHT_LBU_RD_CNT}, {mmNIC0_QM_ARC_AUX0_SCRATCHPAD_0, mmNIC0_QM_ARC_AUX0_INFLIGHT_LBU_RD_CNT},
{mmNIC0_QM_ARC_AUX0_CBU_EARLY_BRESP_EN, mmNIC0_QM_ARC_AUX0_CBU_EARLY_BRESP_EN},
{mmNIC0_QM_ARC_AUX0_LBU_EARLY_BRESP_EN, mmNIC0_QM_ARC_AUX0_LBU_EARLY_BRESP_EN}, {mmNIC0_QM_ARC_AUX0_LBU_EARLY_BRESP_EN, mmNIC0_QM_ARC_AUX0_LBU_EARLY_BRESP_EN},
{mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_BASE_ADDR_0, mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_ALERT_MSG}, {mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_BASE_ADDR_0, mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_ALERT_MSG},
{mmNIC0_QM_ARC_AUX0_DCCM_Q_PUSH_FIFO_CNT, mmNIC0_QM_ARC_AUX0_QMAN_ARC_CQ_SHADOW_CI}, {mmNIC0_QM_ARC_AUX0_DCCM_Q_PUSH_FIFO_CNT, mmNIC0_QM_ARC_AUX0_QMAN_ARC_CQ_SHADOW_CI},

View File

@ -4475,8 +4475,8 @@ static void goya_print_out_of_sync_info(struct hl_device *hdev,
{ {
struct hl_hw_queue *q = &hdev->kernel_queues[GOYA_QUEUE_ID_CPU_PQ]; struct hl_hw_queue *q = &hdev->kernel_queues[GOYA_QUEUE_ID_CPU_PQ];
dev_err(hdev->dev, "Out of sync with FW, FW: pi=%u, ci=%u, LKD: pi=%u, ci=%u\n", dev_err(hdev->dev, "Out of sync with FW, FW: pi=%u, ci=%u, LKD: pi=%u, ci=%d\n",
sync_err->pi, sync_err->ci, q->pi, atomic_read(&q->ci)); le32_to_cpu(sync_err->pi), le32_to_cpu(sync_err->ci), q->pi, atomic_read(&q->ci));
} }
static void goya_print_irq_info(struct hl_device *hdev, u16 event_type, static void goya_print_irq_info(struct hl_device *hdev, u16 event_type,

View File

@ -957,6 +957,7 @@ enum gaudi2_async_event_id {
GAUDI2_EVENT_CPU11_STATUS_NIC11_ENG0 = 1317, GAUDI2_EVENT_CPU11_STATUS_NIC11_ENG0 = 1317,
GAUDI2_EVENT_CPU11_STATUS_NIC11_ENG1 = 1318, GAUDI2_EVENT_CPU11_STATUS_NIC11_ENG1 = 1318,
GAUDI2_EVENT_ARC_DCCM_FULL = 1319, GAUDI2_EVENT_ARC_DCCM_FULL = 1319,
GAUDI2_EVENT_CPU_FP32_NOT_SUPPORTED = 1320,
GAUDI2_EVENT_SIZE, GAUDI2_EVENT_SIZE,
}; };

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 /* SPDX-License-Identifier: GPL-2.0
* *
* Copyright 2018-2021 HabanaLabs, Ltd. * Copyright 2018-2022 HabanaLabs, Ltd.
* All Rights Reserved. * All Rights Reserved.
* *
*/ */
@ -2663,6 +2663,8 @@ static struct gaudi2_async_events_ids_map gaudi2_irq_map_table[] = {
.msg = 1, .reset = 0, .name = "STATUS_NIC11_ENG1" }, .msg = 1, .reset = 0, .name = "STATUS_NIC11_ENG1" },
{ .fc_id = 1319, .cpu_id = 625, .valid = 1, { .fc_id = 1319, .cpu_id = 625, .valid = 1,
.msg = 1, .reset = 0, .name = "ARC_DCCM_FULL" }, .msg = 1, .reset = 0, .name = "ARC_DCCM_FULL" },
{ .fc_id = 1320, .cpu_id = 626, .valid = 1,
.msg = 1, .reset = 1, .name = "FP32_NOT_SUPPORTED" },
}; };
#endif /* __GAUDI2_ASYNC_IDS_MAP_EVENTS_EXT_H_ */ #endif /* __GAUDI2_ASYNC_IDS_MAP_EVENTS_EXT_H_ */

View File

@ -20,4 +20,11 @@
#define PCI_CONFIG_ELBI_STS_MASK (PCI_CONFIG_ELBI_STS_ERR | \ #define PCI_CONFIG_ELBI_STS_MASK (PCI_CONFIG_ELBI_STS_ERR | \
PCI_CONFIG_ELBI_STS_DONE) PCI_CONFIG_ELBI_STS_DONE)
enum hl_revision_id {
/* PCI revision ID 0 is not legal */
REV_ID_INVALID = 0x00,
REV_ID_A = 0x01,
REV_ID_B = 0x02,
};
#endif /* INCLUDE_PCI_GENERAL_H_ */ #endif /* INCLUDE_PCI_GENERAL_H_ */

View File

@ -597,6 +597,10 @@ enum gaudi2_engine_id {
GAUDI2_ENGINE_ID_NIC10_1, GAUDI2_ENGINE_ID_NIC10_1,
GAUDI2_ENGINE_ID_NIC11_0, GAUDI2_ENGINE_ID_NIC11_0,
GAUDI2_ENGINE_ID_NIC11_1, GAUDI2_ENGINE_ID_NIC11_1,
GAUDI2_ENGINE_ID_PCIE,
GAUDI2_ENGINE_ID_PSOC,
GAUDI2_ENGINE_ID_ARC_FARM,
GAUDI2_ENGINE_ID_KDMA,
GAUDI2_ENGINE_ID_SIZE GAUDI2_ENGINE_ID_SIZE
}; };
@ -717,6 +721,8 @@ enum hl_server_type {
* HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE - Indicates device is unavailable * HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE - Indicates device is unavailable
* HL_NOTIFIER_EVENT_USER_ENGINE_ERR - Indicates device engine in error state * HL_NOTIFIER_EVENT_USER_ENGINE_ERR - Indicates device engine in error state
* HL_NOTIFIER_EVENT_GENERAL_HW_ERR - Indicates device HW error * HL_NOTIFIER_EVENT_GENERAL_HW_ERR - Indicates device HW error
* HL_NOTIFIER_EVENT_RAZWI - Indicates razwi happened
* HL_NOTIFIER_EVENT_PAGE_FAULT - Indicates page fault happened
*/ */
#define HL_NOTIFIER_EVENT_TPC_ASSERT (1ULL << 0) #define HL_NOTIFIER_EVENT_TPC_ASSERT (1ULL << 0)
#define HL_NOTIFIER_EVENT_UNDEFINED_OPCODE (1ULL << 1) #define HL_NOTIFIER_EVENT_UNDEFINED_OPCODE (1ULL << 1)
@ -725,6 +731,8 @@ enum hl_server_type {
#define HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE (1ULL << 4) #define HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE (1ULL << 4)
#define HL_NOTIFIER_EVENT_USER_ENGINE_ERR (1ULL << 5) #define HL_NOTIFIER_EVENT_USER_ENGINE_ERR (1ULL << 5)
#define HL_NOTIFIER_EVENT_GENERAL_HW_ERR (1ULL << 6) #define HL_NOTIFIER_EVENT_GENERAL_HW_ERR (1ULL << 6)
#define HL_NOTIFIER_EVENT_RAZWI (1ULL << 7)
#define HL_NOTIFIER_EVENT_PAGE_FAULT (1ULL << 8)
/* Opcode for management ioctl /* Opcode for management ioctl
* *
@ -778,6 +786,9 @@ enum hl_server_type {
* HL_INFO_UNREGISTER_EVENTFD - Unregister eventfd * HL_INFO_UNREGISTER_EVENTFD - Unregister eventfd
* HL_INFO_GET_EVENTS - Retrieve the last occurred events * HL_INFO_GET_EVENTS - Retrieve the last occurred events
* HL_INFO_UNDEFINED_OPCODE_EVENT - Retrieve last undefined opcode error information. * HL_INFO_UNDEFINED_OPCODE_EVENT - Retrieve last undefined opcode error information.
* HL_INFO_ENGINE_STATUS - Retrieve the status of all the h/w engines in the asic.
* HL_INFO_PAGE_FAULT_EVENT - Retrieve parameters of captured page fault.
* HL_INFO_USER_MAPPINGS - Retrieve user mappings, captured after page fault event.
*/ */
#define HL_INFO_HW_IP_INFO 0 #define HL_INFO_HW_IP_INFO 0
#define HL_INFO_HW_EVENTS 1 #define HL_INFO_HW_EVENTS 1
@ -809,6 +820,8 @@ enum hl_server_type {
#define HL_INFO_GET_EVENTS 30 #define HL_INFO_GET_EVENTS 30
#define HL_INFO_UNDEFINED_OPCODE_EVENT 31 #define HL_INFO_UNDEFINED_OPCODE_EVENT 31
#define HL_INFO_ENGINE_STATUS 32 #define HL_INFO_ENGINE_STATUS 32
#define HL_INFO_PAGE_FAULT_EVENT 33
#define HL_INFO_USER_MAPPINGS 34
#define HL_INFO_VERSION_MAX_LEN 128 #define HL_INFO_VERSION_MAX_LEN 128
#define HL_INFO_CARD_NAME_MAX_LEN 16 #define HL_INFO_CARD_NAME_MAX_LEN 16
@ -859,6 +872,7 @@ enum hl_server_type {
* @number_of_user_interrupts: The number of interrupts that are available to the userspace * @number_of_user_interrupts: The number of interrupts that are available to the userspace
* application to use. Relevant for Gaudi2 and later. * application to use. Relevant for Gaudi2 and later.
* @device_mem_alloc_default_page_size: default page size used in device memory allocation. * @device_mem_alloc_default_page_size: default page size used in device memory allocation.
* @revision_id: PCI revision ID of the ASIC.
*/ */
struct hl_info_hw_ip_info { struct hl_info_hw_ip_info {
__u64 sram_base_address; __u64 sram_base_address;
@ -889,6 +903,12 @@ struct hl_info_hw_ip_info {
__u16 pad2; __u16 pad2;
__u64 reserved4; __u64 reserved4;
__u64 device_mem_alloc_default_page_size; __u64 device_mem_alloc_default_page_size;
__u64 reserved5;
__u64 reserved6;
__u32 reserved7;
__u8 reserved8;
__u8 revision_id;
__u8 pad[2];
}; };
struct hl_info_dram_usage { struct hl_info_dram_usage {
@ -896,7 +916,7 @@ struct hl_info_dram_usage {
__u64 ctx_dram_mem; __u64 ctx_dram_mem;
}; };
#define HL_BUSY_ENGINES_MASK_EXT_SIZE 2 #define HL_BUSY_ENGINES_MASK_EXT_SIZE 4
struct hl_info_hw_idle { struct hl_info_hw_idle {
__u32 is_idle; __u32 is_idle;
@ -1071,31 +1091,44 @@ struct hl_info_cs_timeout_event {
__u64 seq; __u64 seq;
}; };
#define HL_RAZWI_PAGE_FAULT 0 #define HL_RAZWI_NA_ENG_ID U16_MAX
#define HL_RAZWI_MMU_ACCESS_ERROR 1 #define HL_RAZWI_MAX_NUM_OF_ENGINES_PER_RTR 128
#define HL_RAZWI_READ BIT(0)
#define HL_RAZWI_WRITE BIT(1)
#define HL_RAZWI_LBW BIT(2)
#define HL_RAZWI_HBW BIT(3)
#define HL_RAZWI_RR BIT(4)
#define HL_RAZWI_ADDR_DEC BIT(5)
/** /**
* struct hl_info_razwi_event - razwi information. * struct hl_info_razwi_event - razwi information.
* @timestamp: timestamp of razwi. * @timestamp: timestamp of razwi.
* @addr: address which accessing it caused razwi. * @addr: address which accessing it caused razwi.
* @engine_id_1: engine id of the razwi initiator, if it was initiated by engine that does not * @engine_id: engine id of the razwi initiator, if it was initiated by engine that does not
* have engine id it will be set to U16_MAX. * have engine id it will be set to HL_RAZWI_NA_ENG_ID. If there are several possible
* @engine_id_2: second engine id of razwi initiator. Might happen that razwi have 2 possible * engines which caused the razwi, it will hold all of them.
* engines which one them caused the razwi. In that case, it will contain the * @num_of_possible_engines: contains number of possible engine ids. In some asics, razwi indication
* second possible engine id, otherwise it will be set to U16_MAX. * might be common for several engines and there is no way to get the
* @no_engine_id: if razwi initiator does not have engine id, this field will be set to 1, * exact engine. In this way, engine_id array will be filled with all
* otherwise 0. * possible engines caused this razwi. Also, there might be possibility
* @error_type: cause of razwi, page fault or access error, otherwise it will be set to U8_MAX. * in gaudi, where we don't indication on specific engine, in that case
* @pad: padding to 64 bit. * the value of this parameter will be zero.
* @flags: bitmask for additional data: HL_RAZWI_READ - razwi caused by read operation
* HL_RAZWI_WRITE - razwi caused by write operation
* HL_RAZWI_LBW - razwi caused by lbw fabric transaction
* HL_RAZWI_HBW - razwi caused by hbw fabric transaction
* HL_RAZWI_RR - razwi caused by range register
* HL_RAZWI_ADDR_DEC - razwi caused by address decode error
* Note: this data is not supported by all asics, in that case the relevant bits will not
* be set.
*/ */
struct hl_info_razwi_event { struct hl_info_razwi_event {
__s64 timestamp; __s64 timestamp;
__u64 addr; __u64 addr;
__u16 engine_id_1; __u16 engine_id[HL_RAZWI_MAX_NUM_OF_ENGINES_PER_RTR];
__u16 engine_id_2; __u16 num_of_possible_engines;
__u8 no_engine_id; __u8 flags;
__u8 error_type; __u8 pad[5];
__u8 pad[2];
}; };
#define MAX_QMAN_STREAMS_INFO 4 #define MAX_QMAN_STREAMS_INFO 4
@ -1174,6 +1207,29 @@ struct hl_info_sec_attest {
__u8 pad0[2]; __u8 pad0[2];
}; };
/**
* struct hl_page_fault_info - page fault information.
* @timestamp: timestamp of page fault.
* @addr: address which accessing it caused page fault.
* @engine_id: engine id which caused the page fault, supported only in gaudi3.
*/
struct hl_page_fault_info {
__s64 timestamp;
__u64 addr;
__u16 engine_id;
__u8 pad[6];
};
/**
* struct hl_user_mapping - user mapping information.
* @dev_va: device virtual address.
* @size: virtual address mapping size.
*/
struct hl_user_mapping {
__u64 dev_va;
__u64 size;
};
enum gaudi_dcores { enum gaudi_dcores {
HL_GAUDI_WS_DCORE, HL_GAUDI_WS_DCORE,
HL_GAUDI_WN_DCORE, HL_GAUDI_WN_DCORE,
@ -1200,6 +1256,8 @@ enum gaudi_dcores {
* needed, hence updating this variable so user will know the exact amount * needed, hence updating this variable so user will know the exact amount
* of bytes copied by the kernel to the buffer. * of bytes copied by the kernel to the buffer.
* @sec_attest_nonce: Nonce number used for attestation report. * @sec_attest_nonce: Nonce number used for attestation report.
* @array_size: Number of array members copied to user buffer.
* Relevant for HL_INFO_USER_MAPPINGS info ioctl.
* @pad: Padding to 64 bit. * @pad: Padding to 64 bit.
*/ */
struct hl_info_args { struct hl_info_args {
@ -1215,6 +1273,7 @@ struct hl_info_args {
__u32 eventfd; __u32 eventfd;
__u32 user_buffer_actual_size; __u32 user_buffer_actual_size;
__u32 sec_attest_nonce; __u32 sec_attest_nonce;
__u32 array_size;
}; };
__u32 pad; __u32 pad;