Linux 6.4-rc2

-----BEGIN PGP SIGNATURE-----
 
 iQFSBAABCAA8FiEEq68RxlopcLEwq+PEeb4+QwBBGIYFAmRhO8weHHRvcnZhbGRz
 QGxpbnV4LWZvdW5kYXRpb24ub3JnAAoJEHm+PkMAQRiGscUH/3T3ofuhSxphqi0F
 kBsfuZ2mFD9G0JrYZmN6jcAv5+NFPe10+oZsayPlVqdY+CbTqLvkBTxQWm3VnjDT
 wJ2spqhQuVb7gNYqKO+AYlNdjpC5ckFJLc0xHMcFioTocob3E4sVPMZ7ff1n9+LX
 0L+V+T3Dwn7XzT9h5RLhPHQKtTEOfuu2uOSUxYeoS1ChSsbh7Ok8RfdNDW+LHVck
 B0QuuOmxf1nsob8AOU26A3ZBdeFxCzr6fa9NblYTu3ul5kzpJs4Fv3LzFCd80mH6
 lq4d4LlMnCdAPIqgZfb1k+V9CgILE8JQH6ajfrMcpveD67T5/2qZIEFoMRCcsuca
 f/vIcfM=
 =os13
 -----END PGP SIGNATURE-----
gpgsig -----BEGIN PGP SIGNATURE-----
 
 iQFHBAABCgAxFiEEreZoqmdXGLWf4p/qJNaLcl1Uh9AFAmRm7cATHGJyb29uaWVA
 a2VybmVsLm9yZwAKCRAk1otyXVSH0CMdB/wPtzv9SU+fO61GWdqPaDa4f53qEiM4
 jWLuhPgxON4glIAqm4RJqPwb2FzF28V1UzlmDRD5dQczCSBgWsFpaxOpC9APhxNW
 TRaKmJzNPqpCIvHShQ6UWbMOqvp+GPJRu+tXBl/rZaUYums4u1Sp5GJLxvlHP//B
 /9gfiiqoaqgXrNbLTpzOumBcZLFUO3hNQH5SAVr9Y0BbmNK2okV1ZyWO0AUREvn8
 tUNjDNLzPwWGgZyrmgQHSBiNREevM1WM9/uNbX6PcyZapTPLGeIKclJfNj7sSFpH
 Ua6vZSsqUhBbeFyuZ8nGqPYawuO3AJvlnfdy1xZIgO4eA8772DhMw38+
 =iaNV
 -----END PGP SIGNATURE-----

Merge tag 'v6.4-rc2' into asoc-6.5 to get fixes for CI

Linux 6.4-rc2
This commit is contained in:
Mark Brown 2023-05-19 12:32:04 +09:00
commit af53b00fa3
192 changed files with 2883 additions and 2008 deletions

View File

@ -18,7 +18,6 @@ Block
kyber-iosched
null_blk
pr
request
stat
switching-sched
writeback_cache_control

View File

@ -1,99 +0,0 @@
============================
struct request documentation
============================
Jens Axboe <jens.axboe@oracle.com> 27/05/02
.. FIXME:
No idea about what does mean - seems just some noise, so comment it
1.0
Index
2.0 Struct request members classification
2.1 struct request members explanation
3.0
2.0
Short explanation of request members
====================================
Classification flags:
= ====================
D driver member
B block layer member
I I/O scheduler member
= ====================
Unless an entry contains a D classification, a device driver must not access
this member. Some members may contain D classifications, but should only be
access through certain macros or functions (eg ->flags).
<linux/blkdev.h>
=============================== ======= =======================================
Member Flag Comment
=============================== ======= =======================================
struct list_head queuelist BI Organization on various internal
queues
``void *elevator_private`` I I/O scheduler private data
unsigned char cmd[16] D Driver can use this for setting up
a cdb before execution, see
blk_queue_prep_rq
unsigned long flags DBI Contains info about data direction,
request type, etc.
int rq_status D Request status bits
kdev_t rq_dev DBI Target device
int errors DB Error counts
sector_t sector DBI Target location
unsigned long hard_nr_sectors B Used to keep sector sane
unsigned long nr_sectors DBI Total number of sectors in request
unsigned long hard_nr_sectors B Used to keep nr_sectors sane
unsigned short nr_phys_segments DB Number of physical scatter gather
segments in a request
unsigned short nr_hw_segments DB Number of hardware scatter gather
segments in a request
unsigned int current_nr_sectors DB Number of sectors in first segment
of request
unsigned int hard_cur_sectors B Used to keep current_nr_sectors sane
int tag DB TCQ tag, if assigned
``void *special`` D Free to be used by driver
``char *buffer`` D Map of first segment, also see
section on bouncing SECTION
``struct completion *waiting`` D Can be used by driver to get signalled
on request completion
``struct bio *bio`` DBI First bio in request
``struct bio *biotail`` DBI Last bio in request
``struct request_queue *q`` DB Request queue this request belongs to
``struct request_list *rl`` B Request list this request came from
=============================== ======= =======================================

View File

@ -49,6 +49,7 @@ properties:
properties:
data-lanes:
minItems: 1
maxItems: 2
required:

View File

@ -17,20 +17,11 @@ description:
properties:
clocks:
minItems: 3
items:
- description: PCIe bridge clock.
- description: PCIe bus clock.
- description: PCIe PHY clock.
- description: Additional required clock entry for imx6sx-pcie,
imx6sx-pcie-ep, imx8mq-pcie, imx8mq-pcie-ep.
maxItems: 4
clock-names:
minItems: 3
items:
- const: pcie
- const: pcie_bus
- enum: [ pcie_phy, pcie_aux ]
- enum: [ pcie_inbound_axi, pcie_aux ]
maxItems: 4
num-lanes:
const: 1

View File

@ -31,6 +31,19 @@ properties:
- const: dbi
- const: addr_space
clocks:
minItems: 3
items:
- description: PCIe bridge clock.
- description: PCIe bus clock.
- description: PCIe PHY clock.
- description: Additional required clock entry for imx6sx-pcie,
imx6sx-pcie-ep, imx8mq-pcie, imx8mq-pcie-ep.
clock-names:
minItems: 3
maxItems: 4
interrupts:
items:
- description: builtin eDMA interrupter.
@ -49,6 +62,31 @@ required:
allOf:
- $ref: /schemas/pci/snps,dw-pcie-ep.yaml#
- $ref: /schemas/pci/fsl,imx6q-pcie-common.yaml#
- if:
properties:
compatible:
enum:
- fsl,imx8mq-pcie-ep
then:
properties:
clocks:
minItems: 4
clock-names:
items:
- const: pcie
- const: pcie_bus
- const: pcie_phy
- const: pcie_aux
else:
properties:
clocks:
maxItems: 3
clock-names:
items:
- const: pcie
- const: pcie_bus
- const: pcie_aux
unevaluatedProperties: false

View File

@ -40,6 +40,19 @@ properties:
- const: dbi
- const: config
clocks:
minItems: 3
items:
- description: PCIe bridge clock.
- description: PCIe bus clock.
- description: PCIe PHY clock.
- description: Additional required clock entry for imx6sx-pcie,
imx6sx-pcie-ep, imx8mq-pcie, imx8mq-pcie-ep.
clock-names:
minItems: 3
maxItems: 4
interrupts:
items:
- description: builtin MSI controller.
@ -77,6 +90,70 @@ required:
allOf:
- $ref: /schemas/pci/snps,dw-pcie.yaml#
- $ref: /schemas/pci/fsl,imx6q-pcie-common.yaml#
- if:
properties:
compatible:
enum:
- fsl,imx6sx-pcie
then:
properties:
clocks:
minItems: 4
clock-names:
items:
- const: pcie
- const: pcie_bus
- const: pcie_phy
- const: pcie_inbound_axi
- if:
properties:
compatible:
enum:
- fsl,imx8mq-pcie
then:
properties:
clocks:
minItems: 4
clock-names:
items:
- const: pcie
- const: pcie_bus
- const: pcie_phy
- const: pcie_aux
- if:
properties:
compatible:
enum:
- fsl,imx6q-pcie
- fsl,imx6qp-pcie
- fsl,imx7d-pcie
then:
properties:
clocks:
maxItems: 3
clock-names:
items:
- const: pcie
- const: pcie_bus
- const: pcie_phy
- if:
properties:
compatible:
enum:
- fsl,imx8mm-pcie
- fsl,imx8mp-pcie
then:
properties:
clocks:
maxItems: 3
clock-names:
items:
- const: pcie
- const: pcie_bus
- const: pcie_aux
unevaluatedProperties: false

View File

@ -776,10 +776,11 @@ peer_notif_delay
Specify the delay, in milliseconds, between each peer
notification (gratuitous ARP and unsolicited IPv6 Neighbor
Advertisement) when they are issued after a failover event.
This delay should be a multiple of the link monitor interval
(arp_interval or miimon, whichever is active). The default
value is 0 which means to match the value of the link monitor
interval.
This delay should be a multiple of the MII link monitor interval
(miimon).
The valid range is 0 - 300000. The default value is 0, which means
to match the value of the MII link monitor interval.
prio
Slave priority. A higher number means higher priority.

View File

@ -116,8 +116,8 @@ Contents:
udplite
vrf
vxlan
x25-iface
x25
x25-iface
xfrm_device
xfrm_proc
xfrm_sync

View File

@ -1,8 +1,7 @@
.. SPDX-License-Identifier: GPL-2.0
============================-
X.25 Device Driver Interface
============================-
============================
Version 1.1

File diff suppressed because it is too large Load Diff

View File

@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 4
SUBLEVEL = 0
EXTRAVERSION = -rc1
EXTRAVERSION = -rc2
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*

View File

@ -308,6 +308,29 @@ static int unwind_exec_pop_subset_r0_to_r3(struct unwind_ctrl_block *ctrl,
return URC_OK;
}
static unsigned long unwind_decode_uleb128(struct unwind_ctrl_block *ctrl)
{
unsigned long bytes = 0;
unsigned long insn;
unsigned long result = 0;
/*
* unwind_get_byte() will advance `ctrl` one instruction at a time, so
* loop until we get an instruction byte where bit 7 is not set.
*
* Note: This decodes a maximum of 4 bytes to output 28 bits data where
* max is 0xfffffff: that will cover a vsp increment of 1073742336, hence
* it is sufficient for unwinding the stack.
*/
do {
insn = unwind_get_byte(ctrl);
result |= (insn & 0x7f) << (bytes * 7);
bytes++;
} while (!!(insn & 0x80) && (bytes != sizeof(result)));
return result;
}
/*
* Execute the current unwind instruction.
*/
@ -361,7 +384,7 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
if (ret)
goto error;
} else if (insn == 0xb2) {
unsigned long uleb128 = unwind_get_byte(ctrl);
unsigned long uleb128 = unwind_decode_uleb128(ctrl);
ctrl->vrs[SP] += 0x204 + (uleb128 << 2);
} else {

View File

@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
/**
/*
* arch/arm/mac-sa1100/jornada720_ssp.c
*
* Copyright (C) 2006/2007 Kristoffer Ericson <Kristoffer.Ericson@gmail.com>
@ -26,6 +26,7 @@ static unsigned long jornada_ssp_flags;
/**
* jornada_ssp_reverse - reverses input byte
* @byte: input byte to reverse
*
* we need to reverse all data we receive from the mcu due to its physical location
* returns : 01110111 -> 11101110
@ -46,6 +47,7 @@ EXPORT_SYMBOL(jornada_ssp_reverse);
/**
* jornada_ssp_byte - waits for ready ssp bus and sends byte
* @byte: input byte to transmit
*
* waits for fifo buffer to clear and then transmits, if it doesn't then we will
* timeout after <timeout> rounds. Needs mcu running before its called.
@ -77,6 +79,7 @@ EXPORT_SYMBOL(jornada_ssp_byte);
/**
* jornada_ssp_inout - decide if input is command or trading byte
* @byte: input byte to send (may be %TXDUMMY)
*
* returns : (jornada_ssp_byte(byte)) on success
* : %-ETIMEDOUT on timeout failure

View File

@ -23,6 +23,9 @@
@
ENTRY(do_vfp)
mov r1, r10
mov r3, r9
b vfp_entry
str lr, [sp, #-8]!
add r3, sp, #4
str r9, [r3]
bl vfp_entry
ldr pc, [sp], #8
ENDPROC(do_vfp)

View File

@ -172,13 +172,14 @@ vfp_hw_state_valid:
@ out before setting an FPEXC that
@ stops us reading stuff
VFPFMXR FPEXC, r1 @ Restore FPEXC last
mov sp, r3 @ we think we have handled things
pop {lr}
sub r2, r2, #4 @ Retry current instruction - if Thumb
str r2, [sp, #S_PC] @ mode it's two 16-bit instructions,
@ else it's one 32-bit instruction, so
@ always subtract 4 from the following
@ instruction address.
mov lr, r3 @ we think we have handled things
local_bh_enable_and_ret:
adr r0, .
mov r1, #SOFTIRQ_DISABLE_OFFSET
@ -209,8 +210,9 @@ skip:
process_exception:
DBGSTR "bounce"
mov sp, r3 @ setup for a return to the user code.
pop {lr}
mov r2, sp @ nothing stacked - regdump is at TOS
mov lr, r3 @ setup for a return to the user code.
@ Now call the C code to package up the bounce to the support code
@ r0 holds the trigger instruction

View File

@ -413,12 +413,12 @@ extern void paging_init (void);
* For the 64bit version, the offset is extended by 32bit.
*/
#define __swp_type(x) ((x).val & 0x1f)
#define __swp_offset(x) ( (((x).val >> 6) & 0x7) | \
(((x).val >> 8) & ~0x7) )
#define __swp_offset(x) ( (((x).val >> 5) & 0x7) | \
(((x).val >> 10) << 3) )
#define __swp_entry(type, offset) ((swp_entry_t) { \
((type) & 0x1f) | \
((offset & 0x7) << 6) | \
((offset & ~0x7) << 8) })
((offset & 0x7) << 5) | \
((offset >> 3) << 10) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })

View File

@ -4,6 +4,8 @@
#include <linux/console.h>
#include <linux/kexec.h>
#include <linux/delay.h>
#include <linux/reboot.h>
#include <asm/cacheflush.h>
#include <asm/sections.h>

View File

@ -22,7 +22,7 @@ KCOV_INSTRUMENT := n
$(obj)/%.pi.o: OBJCOPYFLAGS := --prefix-symbols=__pi_ \
--remove-section=.note.gnu.property \
--prefix-alloc-sections=.init
--prefix-alloc-sections=.init.pi
$(obj)/%.pi.o: $(obj)/%.o FORCE
$(call if_changed,objcopy)

View File

@ -84,11 +84,8 @@ SECTIONS
__init_data_begin = .;
INIT_DATA_SECTION(16)
/* Those sections result from the compilation of kernel/pi/string.c */
.init.pidata : {
*(.init.srodata.cst8*)
*(.init__bug_table*)
*(.init.sdata*)
.init.pi : {
*(.init.pi*)
}
.init.bss : {

View File

@ -1703,10 +1703,8 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
perf_sample_data_init(&data, 0, event->hw.last_period);
if (has_branch_stack(event)) {
data.br_stack = &cpuc->lbr_stack;
data.sample_flags |= PERF_SAMPLE_BRANCH_STACK;
}
if (has_branch_stack(event))
perf_sample_save_brstack(&data, event, &cpuc->lbr_stack);
if (perf_event_overflow(event, &data, regs))
x86_pmu_stop(event, 0);

View File

@ -1229,12 +1229,14 @@ pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
struct perf_event *event, bool add)
{
struct pmu *pmu = event->pmu;
/*
* Make sure we get updated with the first PEBS
* event. It will trigger also during removal, but
* that does not hurt:
*/
bool update = cpuc->n_pebs == 1;
if (cpuc->n_pebs == 1)
cpuc->pebs_data_cfg = PEBS_UPDATE_DS_SW;
if (needed_cb != pebs_needs_sched_cb(cpuc)) {
if (!needed_cb)
@ -1242,7 +1244,7 @@ pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
else
perf_sched_cb_dec(pmu);
update = true;
cpuc->pebs_data_cfg |= PEBS_UPDATE_DS_SW;
}
/*
@ -1252,24 +1254,13 @@ pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
if (x86_pmu.intel_cap.pebs_baseline && add) {
u64 pebs_data_cfg;
/* Clear pebs_data_cfg and pebs_record_size for first PEBS. */
if (cpuc->n_pebs == 1) {
cpuc->pebs_data_cfg = 0;
cpuc->pebs_record_size = sizeof(struct pebs_basic);
}
pebs_data_cfg = pebs_update_adaptive_cfg(event);
/* Update pebs_record_size if new event requires more data. */
if (pebs_data_cfg & ~cpuc->pebs_data_cfg) {
cpuc->pebs_data_cfg |= pebs_data_cfg;
adaptive_pebs_record_size_update();
update = true;
}
/*
* Be sure to update the thresholds when we change the record.
*/
if (pebs_data_cfg & ~cpuc->pebs_data_cfg)
cpuc->pebs_data_cfg |= pebs_data_cfg | PEBS_UPDATE_DS_SW;
}
if (update)
pebs_update_threshold(cpuc);
}
void intel_pmu_pebs_add(struct perf_event *event)
@ -1326,9 +1317,17 @@ static void intel_pmu_pebs_via_pt_enable(struct perf_event *event)
wrmsrl(base + idx, value);
}
static inline void intel_pmu_drain_large_pebs(struct cpu_hw_events *cpuc)
{
if (cpuc->n_pebs == cpuc->n_large_pebs &&
cpuc->n_pebs != cpuc->n_pebs_via_pt)
intel_pmu_drain_pebs_buffer();
}
void intel_pmu_pebs_enable(struct perf_event *event)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
u64 pebs_data_cfg = cpuc->pebs_data_cfg & ~PEBS_UPDATE_DS_SW;
struct hw_perf_event *hwc = &event->hw;
struct debug_store *ds = cpuc->ds;
unsigned int idx = hwc->idx;
@ -1344,11 +1343,22 @@ void intel_pmu_pebs_enable(struct perf_event *event)
if (x86_pmu.intel_cap.pebs_baseline) {
hwc->config |= ICL_EVENTSEL_ADAPTIVE;
if (cpuc->pebs_data_cfg != cpuc->active_pebs_data_cfg) {
wrmsrl(MSR_PEBS_DATA_CFG, cpuc->pebs_data_cfg);
cpuc->active_pebs_data_cfg = cpuc->pebs_data_cfg;
if (pebs_data_cfg != cpuc->active_pebs_data_cfg) {
/*
* drain_pebs() assumes uniform record size;
* hence we need to drain when changing said
* size.
*/
intel_pmu_drain_large_pebs(cpuc);
adaptive_pebs_record_size_update();
wrmsrl(MSR_PEBS_DATA_CFG, pebs_data_cfg);
cpuc->active_pebs_data_cfg = pebs_data_cfg;
}
}
if (cpuc->pebs_data_cfg & PEBS_UPDATE_DS_SW) {
cpuc->pebs_data_cfg = pebs_data_cfg;
pebs_update_threshold(cpuc);
}
if (idx >= INTEL_PMC_IDX_FIXED) {
if (x86_pmu.intel_cap.pebs_format < 5)
@ -1391,9 +1401,7 @@ void intel_pmu_pebs_disable(struct perf_event *event)
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
if (cpuc->n_pebs == cpuc->n_large_pebs &&
cpuc->n_pebs != cpuc->n_pebs_via_pt)
intel_pmu_drain_pebs_buffer();
intel_pmu_drain_large_pebs(cpuc);
cpuc->pebs_enabled &= ~(1ULL << hwc->idx);

View File

@ -121,6 +121,9 @@
#define PEBS_DATACFG_LBRS BIT_ULL(3)
#define PEBS_DATACFG_LBR_SHIFT 24
/* Steal the highest bit of pebs_data_cfg for SW usage */
#define PEBS_UPDATE_DS_SW BIT_ULL(63)
/*
* Intel "Architectural Performance Monitoring" CPUID
* detection/enumeration details:

View File

@ -36,6 +36,7 @@
#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F4 0x166e
#define PCI_DEVICE_ID_AMD_19H_M60H_DF_F4 0x14e4
#define PCI_DEVICE_ID_AMD_19H_M70H_DF_F4 0x14f4
#define PCI_DEVICE_ID_AMD_19H_M78H_DF_F4 0x12fc
/* Protect the PCI config register pairs used for SMN. */
static DEFINE_MUTEX(smn_mutex);
@ -79,6 +80,7 @@ static const struct pci_device_id amd_nb_misc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F3) },
{}
};

View File

@ -144,8 +144,8 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array)
*/
.align 64
.skip 63, 0xcc
SYM_FUNC_START_NOALIGN(zen_untrain_ret);
SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
ANNOTATE_NOENDBR
/*
* As executed from zen_untrain_ret, this is:
*

View File

@ -1666,7 +1666,7 @@ static int nbd_dev_dbg_init(struct nbd_device *nbd)
return -EIO;
dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
if (!dir) {
if (IS_ERR(dir)) {
dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
nbd_name(nbd));
return -EIO;
@ -1692,7 +1692,7 @@ static int nbd_dbg_init(void)
struct dentry *dbg_dir;
dbg_dir = debugfs_create_dir("nbd", NULL);
if (!dbg_dir)
if (IS_ERR(dbg_dir))
return -EIO;
nbd_dbg_dir = dbg_dir;

View File

@ -241,7 +241,7 @@ static inline blk_opf_t rnbd_to_bio_flags(u32 rnbd_opf)
bio_opf = REQ_OP_WRITE;
break;
case RNBD_OP_FLUSH:
bio_opf = REQ_OP_FLUSH | REQ_PREFLUSH;
bio_opf = REQ_OP_WRITE | REQ_PREFLUSH;
break;
case RNBD_OP_DISCARD:
bio_opf = REQ_OP_DISCARD;

View File

@ -1281,7 +1281,7 @@ static inline int ublk_check_cmd_op(u32 cmd_op)
{
u32 ioc_type = _IOC_TYPE(cmd_op);
if (IS_ENABLED(CONFIG_BLKDEV_UBLK_LEGACY_OPCODES) && ioc_type != 'u')
if (!IS_ENABLED(CONFIG_BLKDEV_UBLK_LEGACY_OPCODES) && ioc_type != 'u')
return -EOPNOTSUPP;
if (ioc_type != 'u' && ioc_type != 0)

View File

@ -571,6 +571,7 @@ void read_cdat_data(struct cxl_port *port)
/* Don't leave table data allocated on error */
devm_kfree(dev, cdat_table);
dev_err(dev, "CDAT data read error\n");
return;
}
port->cdat.table = cdat_table + sizeof(__le32);

View File

@ -706,21 +706,22 @@ static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r,
int rcode;
if (destination == IEEE1394_ALL_NODES) {
kfree(r);
return;
}
if (offset != dev->handler.offset)
// Although the response to the broadcast packet is not necessarily required, the
// fw_send_response() function should still be called to maintain the reference
// counting of the object. In the case, the call of function just releases the
// object as a result to decrease the reference counting.
rcode = RCODE_COMPLETE;
} else if (offset != dev->handler.offset) {
rcode = RCODE_ADDRESS_ERROR;
else if (tcode != TCODE_WRITE_BLOCK_REQUEST)
} else if (tcode != TCODE_WRITE_BLOCK_REQUEST) {
rcode = RCODE_TYPE_ERROR;
else if (fwnet_incoming_packet(dev, payload, length,
source, generation, false) != 0) {
} else if (fwnet_incoming_packet(dev, payload, length,
source, generation, false) != 0) {
dev_err(&dev->netdev->dev, "incoming packet failure\n");
rcode = RCODE_CONFLICT_ERROR;
} else
} else {
rcode = RCODE_COMPLETE;
}
fw_send_response(card, r, rcode);
}

View File

@ -51,7 +51,8 @@ __init bool sysfb_parse_mode(const struct screen_info *si,
*
* It's not easily possible to fix this in struct screen_info,
* as this could break UAPI. The best solution is to compute
* bits_per_pixel here and ignore lfb_depth. In the loop below,
* bits_per_pixel from the color bits, reserved bits and
* reported lfb_depth, whichever is highest. In the loop below,
* ignore simplefb formats with alpha bits, as EFI and VESA
* don't specify alpha channels.
*/
@ -60,6 +61,7 @@ __init bool sysfb_parse_mode(const struct screen_info *si,
si->green_size + si->green_pos,
si->blue_size + si->blue_pos),
si->rsvd_size + si->rsvd_pos);
bits_per_pixel = max_t(u32, bits_per_pixel, si->lfb_depth);
} else {
bits_per_pixel = si->lfb_depth;
}

View File

@ -3757,6 +3757,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
(PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
/* APUs w/ gfx9 onwards doesn't reply on PCIe atomics, rather it is a
* internal path natively support atomics, set have_atomics_support to true.
*/
else if ((adev->flags & AMD_IS_APU) &&
(adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0)))
adev->have_atomics_support = true;
else
adev->have_atomics_support =
!pci_enable_atomic_ops_to_root(adev->pdev,
@ -4506,7 +4512,11 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
dev_info(adev->dev, "recover vram bo from shadow start\n");
mutex_lock(&adev->shadow_list_lock);
list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
shadow = &vmbo->bo;
/* If vm is compute context or adev is APU, shadow will be NULL */
if (!vmbo->shadow)
continue;
shadow = vmbo->shadow;
/* No need to recover an evicted BO */
if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||

View File

@ -687,9 +687,11 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *r
if (r)
return r;
r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
if (r)
goto late_fini;
if (adev->gfx.cp_ecc_error_irq.funcs) {
r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
if (r)
goto late_fini;
}
} else {
amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
}

View File

@ -1315,13 +1315,6 @@ static int gfx_v11_0_sw_init(void *handle)
if (r)
return r;
/* ECC error */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
GFX_11_0_0__SRCID__CP_ECC_ERROR,
&adev->gfx.cp_ecc_error_irq);
if (r)
return r;
/* FED error */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
GFX_11_0_0__SRCID__RLC_GC_FED_INTERRUPT,
@ -4444,7 +4437,6 @@ static int gfx_v11_0_hw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
@ -5897,36 +5889,6 @@ static void gfx_v11_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev
}
}
#define CP_ME1_PIPE_INST_ADDR_INTERVAL 0x1
#define SET_ECC_ME_PIPE_STATE(reg_addr, state) \
do { \
uint32_t tmp = RREG32_SOC15_IP(GC, reg_addr); \
tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, state); \
WREG32_SOC15_IP(GC, reg_addr, tmp); \
} while (0)
static int gfx_v11_0_set_cp_ecc_error_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
enum amdgpu_interrupt_state state)
{
uint32_t ecc_irq_state = 0;
uint32_t pipe0_int_cntl_addr = 0;
int i = 0;
ecc_irq_state = (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0;
pipe0_int_cntl_addr = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL);
WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0, CP_ECC_ERROR_INT_ENABLE, ecc_irq_state);
for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++)
SET_ECC_ME_PIPE_STATE(pipe0_int_cntl_addr + i * CP_ME1_PIPE_INST_ADDR_INTERVAL,
ecc_irq_state);
return 0;
}
static int gfx_v11_0_set_eop_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
@ -6341,11 +6303,6 @@ static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_inst_irq_funcs = {
.process = gfx_v11_0_priv_inst_irq,
};
static const struct amdgpu_irq_src_funcs gfx_v11_0_cp_ecc_error_irq_funcs = {
.set = gfx_v11_0_set_cp_ecc_error_state,
.process = amdgpu_gfx_cp_ecc_error_irq,
};
static const struct amdgpu_irq_src_funcs gfx_v11_0_rlc_gc_fed_irq_funcs = {
.process = gfx_v11_0_rlc_gc_fed_irq,
};
@ -6361,9 +6318,6 @@ static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev)
adev->gfx.priv_inst_irq.num_types = 1;
adev->gfx.priv_inst_irq.funcs = &gfx_v11_0_priv_inst_irq_funcs;
adev->gfx.cp_ecc_error_irq.num_types = 1; /* CP ECC error */
adev->gfx.cp_ecc_error_irq.funcs = &gfx_v11_0_cp_ecc_error_irq_funcs;
adev->gfx.rlc_gc_fed_irq.num_types = 1; /* 0x80 FED error */
adev->gfx.rlc_gc_fed_irq.funcs = &gfx_v11_0_rlc_gc_fed_irq_funcs;

View File

@ -3764,7 +3764,8 @@ static int gfx_v9_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);

View File

@ -54,6 +54,7 @@ static int jpeg_v3_0_early_init(void *handle)
switch (adev->ip_versions[UVD_HWIP][0]) {
case IP_VERSION(3, 1, 1):
case IP_VERSION(3, 1, 2):
break;
default:
harvest = RREG32_SOC15(JPEG, 0, mmCC_UVD_HARVESTING);

View File

@ -98,6 +98,16 @@ static const struct amdgpu_video_codecs nv_video_codecs_decode =
};
/* Sienna Cichlid */
static const struct amdgpu_video_codec_info sc_video_codecs_encode_array[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2160, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 7680, 4352, 0)},
};
static const struct amdgpu_video_codecs sc_video_codecs_encode = {
.codec_count = ARRAY_SIZE(sc_video_codecs_encode_array),
.codec_array = sc_video_codecs_encode_array,
};
static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn0[] =
{
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
@ -136,8 +146,8 @@ static const struct amdgpu_video_codecs sc_video_codecs_decode_vcn1 =
/* SRIOV Sienna Cichlid, not const since data is controlled by host */
static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] =
{
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2160, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 7680, 4352, 0)},
};
static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn0[] =
@ -237,12 +247,12 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
} else {
if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) {
if (encode)
*codecs = &nv_video_codecs_encode;
*codecs = &sc_video_codecs_encode;
else
*codecs = &sc_video_codecs_decode_vcn1;
} else {
if (encode)
*codecs = &nv_video_codecs_encode;
*codecs = &sc_video_codecs_encode;
else
*codecs = &sc_video_codecs_decode_vcn0;
}
@ -251,14 +261,14 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
case IP_VERSION(3, 0, 16):
case IP_VERSION(3, 0, 2):
if (encode)
*codecs = &nv_video_codecs_encode;
*codecs = &sc_video_codecs_encode;
else
*codecs = &sc_video_codecs_decode_vcn0;
return 0;
case IP_VERSION(3, 1, 1):
case IP_VERSION(3, 1, 2):
if (encode)
*codecs = &nv_video_codecs_encode;
*codecs = &sc_video_codecs_encode;
else
*codecs = &yc_video_codecs_decode;
return 0;

View File

@ -1917,9 +1917,11 @@ static int sdma_v4_0_hw_fini(void *handle)
return 0;
}
for (i = 0; i < adev->sdma.num_instances; i++) {
amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
AMDGPU_SDMA_IRQ_INSTANCE0 + i);
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
for (i = 0; i < adev->sdma.num_instances; i++) {
amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
AMDGPU_SDMA_IRQ_INSTANCE0 + i);
}
}
sdma_v4_0_ctx_switch_enable(adev, false);

View File

@ -711,7 +711,7 @@ static int soc21_common_early_init(void *handle)
AMD_PG_SUPPORT_VCN_DPG |
AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_JPEG;
adev->external_rev_id = adev->rev_id + 0x1;
adev->external_rev_id = adev->rev_id + 0x80;
break;
default:

View File

@ -423,3 +423,68 @@ void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool
PERF_TRACE();
}
static void apply_symclk_on_tx_off_wa(struct dc_link *link)
{
/* There are use cases where SYMCLK is referenced by OTG. For instance
* for TMDS signal, OTG relies SYMCLK even if TX video output is off.
* However current link interface will power off PHY when disabling link
* output. This will turn off SYMCLK generated by PHY. The workaround is
* to identify such case where SYMCLK is still in use by OTG when we
* power off PHY. When this is detected, we will temporarily power PHY
* back on and move PHY's SYMCLK state to SYMCLK_ON_TX_OFF by calling
* program_pix_clk interface. When OTG is disabled, we will then power
* off PHY by calling disable link output again.
*
* In future dcn generations, we plan to rework transmitter control
* interface so that we could have an option to set SYMCLK ON TX OFF
* state in one step without this workaround
*/
struct dc *dc = link->ctx->dc;
struct pipe_ctx *pipe_ctx = NULL;
uint8_t i;
if (link->phy_state.symclk_ref_cnts.otg > 0) {
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream && pipe_ctx->stream->link == link && pipe_ctx->top_pipe == NULL) {
pipe_ctx->clock_source->funcs->program_pix_clk(
pipe_ctx->clock_source,
&pipe_ctx->stream_res.pix_clk_params,
dc->link_srv->dp_get_encoding_format(
&pipe_ctx->link_config.dp_link_settings),
&pipe_ctx->pll_settings);
link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
break;
}
}
}
}
void dcn314_disable_link_output(struct dc_link *link,
const struct link_resource *link_res,
enum signal_type signal)
{
struct dc *dc = link->ctx->dc;
const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
struct dmcu *dmcu = dc->res_pool->dmcu;
if (signal == SIGNAL_TYPE_EDP &&
link->dc->hwss.edp_backlight_control)
link->dc->hwss.edp_backlight_control(link, false);
else if (dmcu != NULL && dmcu->funcs->lock_phy)
dmcu->funcs->lock_phy(dmcu);
link_hwss->disable_link_output(link, link_res, signal);
link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
/*
* Add the logic to extract BOTH power up and power down sequences
* from enable/disable link output and only call edp panel control
* in enable_link_dp and disable_link_dp once.
*/
if (dmcu != NULL && dmcu->funcs->lock_phy)
dmcu->funcs->unlock_phy(dmcu);
dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
apply_symclk_on_tx_off_wa(link);
}

View File

@ -45,4 +45,6 @@ void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool
void dcn314_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on);
void dcn314_disable_link_output(struct dc_link *link, const struct link_resource *link_res, enum signal_type signal);
#endif /* __DC_HWSS_DCN314_H__ */

View File

@ -105,7 +105,7 @@ static const struct hw_sequencer_funcs dcn314_funcs = {
.enable_lvds_link_output = dce110_enable_lvds_link_output,
.enable_tmds_link_output = dce110_enable_tmds_link_output,
.enable_dp_link_output = dce110_enable_dp_link_output,
.disable_link_output = dce110_disable_link_output,
.disable_link_output = dcn314_disable_link_output,
.z10_restore = dcn31_z10_restore,
.z10_save_init = dcn31_z10_save_init,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,

View File

@ -810,7 +810,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->SwathHeightY[k],
v->SwathHeightC[k],
TWait,
v->DRAMSpeedPerState[mode_lib->vba.VoltageLevel] <= MEM_STROBE_FREQ_MHZ ?
(v->DRAMSpeedPerState[mode_lib->vba.VoltageLevel] <= MEM_STROBE_FREQ_MHZ ||
v->DCFCLKPerState[mode_lib->vba.VoltageLevel] <= MIN_DCFCLK_FREQ_MHZ) ?
mode_lib->vba.ip.min_prefetch_in_strobe_us : 0,
/* Output */
&v->DSTXAfterScaler[k],
@ -3310,7 +3311,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->swath_width_chroma_ub_this_state[k],
v->SwathHeightYThisState[k],
v->SwathHeightCThisState[k], v->TWait,
v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ ?
(v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ || v->DCFCLKState[i][j] <= MIN_DCFCLK_FREQ_MHZ) ?
mode_lib->vba.ip.min_prefetch_in_strobe_us : 0,
/* Output */

View File

@ -53,6 +53,7 @@
#define BPP_BLENDED_PIPE 0xffffffff
#define MEM_STROBE_FREQ_MHZ 1600
#define MIN_DCFCLK_FREQ_MHZ 200
#define MEM_STROBE_MAX_DELIVERY_TIME_US 60.0
struct display_mode_lib;

View File

@ -36,6 +36,8 @@
#define amdgpu_dpm_enable_bapm(adev, e) \
((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
#define amdgpu_dpm_is_legacy_dpm(adev) ((adev)->powerplay.pp_handle == (adev))
int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
@ -1460,15 +1462,24 @@ int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
{
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
struct smu_context *smu = adev->powerplay.pp_handle;
if (is_support_sw_smu(adev)) {
struct smu_context *smu = adev->powerplay.pp_handle;
if ((is_support_sw_smu(adev) && smu->od_enabled) ||
(is_support_sw_smu(adev) && smu->is_apu) ||
(!is_support_sw_smu(adev) && hwmgr->od_enabled))
return true;
return (smu->od_enabled || smu->is_apu);
} else {
struct pp_hwmgr *hwmgr;
return false;
/*
* dpm on some legacy asics don't carry od_enabled member
* as its pp_handle is casted directly from adev.
*/
if (amdgpu_dpm_is_legacy_dpm(adev))
return false;
hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle;
return hwmgr->od_enabled;
}
}
int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,

View File

@ -425,11 +425,12 @@ struct ast_device *ast_device_create(const struct drm_driver *drv,
return ERR_PTR(-EIO);
/*
* If we don't have IO space at all, use MMIO now and
* assume the chip has MMIO enabled by default (rev 0x20
* and higher).
* After AST2500, MMIO is enabled by default, and it should be adopted
* to be compatible with Arm.
*/
if (!(pci_resource_flags(pdev, 2) & IORESOURCE_IO)) {
if (pdev->revision >= 0x40) {
ast->ioregs = ast->regs + AST_IO_MM_OFFSET;
} else if (!(pci_resource_flags(pdev, 2) & IORESOURCE_IO)) {
drm_info(dev, "platform has no IO space, trying MMIO\n");
ast->ioregs = ast->regs + AST_IO_MM_OFFSET;
}

View File

@ -641,19 +641,27 @@ static void drm_fb_helper_damage(struct drm_fb_helper *helper, u32 x, u32 y,
static void drm_fb_helper_memory_range_to_clip(struct fb_info *info, off_t off, size_t len,
struct drm_rect *clip)
{
u32 line_length = info->fix.line_length;
u32 fb_height = info->var.yres;
off_t end = off + len;
u32 x1 = 0;
u32 y1 = off / info->fix.line_length;
u32 y1 = off / line_length;
u32 x2 = info->var.xres;
u32 y2 = DIV_ROUND_UP(end, info->fix.line_length);
u32 y2 = DIV_ROUND_UP(end, line_length);
/* Don't allow any of them beyond the bottom bound of display area */
if (y1 > fb_height)
y1 = fb_height;
if (y2 > fb_height)
y2 = fb_height;
if ((y2 - y1) == 1) {
/*
* We've only written to a single scanline. Try to reduce
* the number of horizontal pixels that need an update.
*/
off_t bit_off = (off % info->fix.line_length) * 8;
off_t bit_end = (end % info->fix.line_length) * 8;
off_t bit_off = (off % line_length) * 8;
off_t bit_end = (end % line_length) * 8;
x1 = bit_off / info->var.bits_per_pixel;
x2 = DIV_ROUND_UP(bit_end, info->var.bits_per_pixel);

View File

@ -221,7 +221,7 @@ mipi_dsi_device_register_full(struct mipi_dsi_host *host,
return dsi;
}
dsi->dev.of_node = info->node;
device_set_node(&dsi->dev, of_fwnode_handle(info->node));
dsi->channel = info->channel;
strlcpy(dsi->name, info->type, sizeof(dsi->name));

View File

@ -62,10 +62,11 @@ config DRM_I915_FORCE_PROBE
This is the default value for the i915.force_probe module
parameter. Using the module parameter overrides this option.
Force probe the i915 for Intel graphics devices that are
recognized but not properly supported by this kernel version. It is
recommended to upgrade to a kernel version with proper support as soon
as it is available.
Force probe the i915 driver for Intel graphics devices that are
recognized but not properly supported by this kernel version. Force
probing an unsupported device taints the kernel. It is recommended to
upgrade to a kernel version with proper support as soon as it is
available.
It can also be used to block the probe of recognized and fully
supported devices.
@ -75,7 +76,8 @@ config DRM_I915_FORCE_PROBE
Use "<pci-id>[,<pci-id>,...]" to force probe the i915 for listed
devices. For example, "4500" or "4500,4571".
Use "*" to force probe the driver for all known devices.
Use "*" to force probe the driver for all known devices. Not
recommended.
Use "!" right before the ID to block the probe of the device. For
example, "4500,!4571" forces the probe of 4500 and blocks the probe of

View File

@ -1028,7 +1028,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
int ret;
if (old_obj) {
const struct intel_crtc_state *crtc_state =
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state,
to_intel_crtc(old_plane_state->hw.crtc));
@ -1043,7 +1043,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
* This should only fail upon a hung GPU, in which case we
* can safely continue.
*/
if (intel_crtc_needs_modeset(crtc_state)) {
if (new_crtc_state && intel_crtc_needs_modeset(new_crtc_state)) {
ret = i915_sw_fence_await_reservation(&state->commit_ready,
old_obj->base.resv,
false, 0,

View File

@ -1601,6 +1601,11 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
pipe_config->dsc.slice_count =
drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
true);
if (!pipe_config->dsc.slice_count) {
drm_dbg_kms(&dev_priv->drm, "Unsupported Slice Count %d\n",
pipe_config->dsc.slice_count);
return -EINVAL;
}
} else {
u16 dsc_max_output_bpp = 0;
u8 dsc_dp_slice_count;

View File

@ -31,12 +31,14 @@
{ FORCEWAKE_MT, 0, 0, "FORCEWAKE" }
#define COMMON_GEN9BASE_GLOBAL \
{ GEN8_FAULT_TLB_DATA0, 0, 0, "GEN8_FAULT_TLB_DATA0" }, \
{ GEN8_FAULT_TLB_DATA1, 0, 0, "GEN8_FAULT_TLB_DATA1" }, \
{ ERROR_GEN6, 0, 0, "ERROR_GEN6" }, \
{ DONE_REG, 0, 0, "DONE_REG" }, \
{ HSW_GTT_CACHE_EN, 0, 0, "HSW_GTT_CACHE_EN" }
#define GEN9_GLOBAL \
{ GEN8_FAULT_TLB_DATA0, 0, 0, "GEN8_FAULT_TLB_DATA0" }, \
{ GEN8_FAULT_TLB_DATA1, 0, 0, "GEN8_FAULT_TLB_DATA1" }
#define COMMON_GEN12BASE_GLOBAL \
{ GEN12_FAULT_TLB_DATA0, 0, 0, "GEN12_FAULT_TLB_DATA0" }, \
{ GEN12_FAULT_TLB_DATA1, 0, 0, "GEN12_FAULT_TLB_DATA1" }, \
@ -142,6 +144,7 @@ static const struct __guc_mmio_reg_descr xe_lpd_gsc_inst_regs[] = {
static const struct __guc_mmio_reg_descr default_global_regs[] = {
COMMON_BASE_GLOBAL,
COMMON_GEN9BASE_GLOBAL,
GEN9_GLOBAL,
};
static const struct __guc_mmio_reg_descr default_rc_class_regs[] = {

View File

@ -1344,6 +1344,12 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENODEV;
}
if (intel_info->require_force_probe) {
dev_info(&pdev->dev, "Force probing unsupported Device ID %04x, tainting kernel\n",
pdev->device);
add_taint(TAINT_USER, LOCKDEP_STILL_OK);
}
/* Only bind to function 0 of the device. Early generations
* used function 1 as a placeholder for multi-head. This causes
* us confusion instead, especially on the systems where both

View File

@ -2,6 +2,8 @@
#ifndef __NVIF_IF0012_H__
#define __NVIF_IF0012_H__
#include <drm/display/drm_dp.h>
union nvif_outp_args {
struct nvif_outp_v0 {
__u8 version;
@ -63,7 +65,7 @@ union nvif_outp_acquire_args {
__u8 hda;
__u8 mst;
__u8 pad04[4];
__u8 dpcd[16];
__u8 dpcd[DP_RECEIVER_CAP_SIZE];
} dp;
};
} v0;

View File

@ -3,6 +3,7 @@
#define __NVKM_DISP_OUTP_H__
#include "priv.h"
#include <drm/display/drm_dp.h>
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
#include <subdev/bios/dp.h>
@ -42,7 +43,7 @@ struct nvkm_outp {
bool aux_pwr_pu;
u8 lttpr[6];
u8 lttprs;
u8 dpcd[16];
u8 dpcd[DP_RECEIVER_CAP_SIZE];
struct {
int dpcd; /* -1, or index into SUPPORTED_LINK_RATES table */

View File

@ -146,7 +146,7 @@ nvkm_uoutp_mthd_release(struct nvkm_outp *outp, void *argv, u32 argc)
}
static int
nvkm_uoutp_mthd_acquire_dp(struct nvkm_outp *outp, u8 dpcd[16],
nvkm_uoutp_mthd_acquire_dp(struct nvkm_outp *outp, u8 dpcd[DP_RECEIVER_CAP_SIZE],
u8 link_nr, u8 link_bw, bool hda, bool mst)
{
int ret;

View File

@ -309,7 +309,7 @@ static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
*/
void drm_sched_fault(struct drm_gpu_scheduler *sched)
{
if (sched->ready)
if (sched->timeout_wq)
mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0);
}
EXPORT_SYMBOL(drm_sched_fault);

View File

@ -507,6 +507,7 @@ static const struct pci_device_id k10temp_id_table[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F3) },
{ PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
{}
};

View File

@ -1035,7 +1035,6 @@ static int mdp_comp_sub_create(struct mdp_dev *mdp)
{
struct device *dev = &mdp->pdev->dev;
struct device_node *node, *parent;
const struct mtk_mdp_driver_data *data = mdp->mdp_data;
parent = dev->of_node->parent;
@ -1045,7 +1044,7 @@ static int mdp_comp_sub_create(struct mdp_dev *mdp)
int id, alias_id;
struct mdp_comp *comp;
of_id = of_match_node(data->mdp_sub_comp_dt_ids, node);
of_id = of_match_node(mdp->mdp_data->mdp_sub_comp_dt_ids, node);
if (!of_id)
continue;
if (!of_device_is_available(node)) {

View File

@ -378,8 +378,8 @@ static int mxc_isi_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops mxc_isi_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(mxc_isi_pm_suspend, mxc_isi_pm_resume)
SET_RUNTIME_PM_OPS(mxc_isi_runtime_suspend, mxc_isi_runtime_resume, NULL)
SYSTEM_SLEEP_PM_OPS(mxc_isi_pm_suspend, mxc_isi_pm_resume)
RUNTIME_PM_OPS(mxc_isi_runtime_suspend, mxc_isi_runtime_resume, NULL)
};
/* -----------------------------------------------------------------------------
@ -528,7 +528,7 @@ static struct platform_driver mxc_isi_driver = {
.driver = {
.of_match_table = mxc_isi_of_match,
.name = MXC_ISI_DRIVER_NAME,
.pm = &mxc_isi_pm_ops,
.pm = pm_ptr(&mxc_isi_pm_ops),
}
};
module_platform_driver(mxc_isi_driver);

View File

@ -29,11 +29,10 @@ static inline void mxc_isi_write(struct mxc_isi_pipe *pipe, u32 reg, u32 val)
void mxc_isi_channel_set_inbuf(struct mxc_isi_pipe *pipe, dma_addr_t dma_addr)
{
mxc_isi_write(pipe, CHNL_IN_BUF_ADDR, dma_addr);
#if CONFIG_ARCH_DMA_ADDR_T_64BIT
mxc_isi_write(pipe, CHNL_IN_BUF_ADDR, lower_32_bits(dma_addr));
if (pipe->isi->pdata->has_36bit_dma)
mxc_isi_write(pipe, CHNL_IN_BUF_XTND_ADDR, dma_addr >> 32);
#endif
mxc_isi_write(pipe, CHNL_IN_BUF_XTND_ADDR,
upper_32_bits(dma_addr));
}
void mxc_isi_channel_set_outbuf(struct mxc_isi_pipe *pipe,
@ -45,34 +44,36 @@ void mxc_isi_channel_set_outbuf(struct mxc_isi_pipe *pipe,
val = mxc_isi_read(pipe, CHNL_OUT_BUF_CTRL);
if (buf_id == MXC_ISI_BUF1) {
mxc_isi_write(pipe, CHNL_OUT_BUF1_ADDR_Y, dma_addrs[0]);
mxc_isi_write(pipe, CHNL_OUT_BUF1_ADDR_U, dma_addrs[1]);
mxc_isi_write(pipe, CHNL_OUT_BUF1_ADDR_V, dma_addrs[2]);
#if CONFIG_ARCH_DMA_ADDR_T_64BIT
mxc_isi_write(pipe, CHNL_OUT_BUF1_ADDR_Y,
lower_32_bits(dma_addrs[0]));
mxc_isi_write(pipe, CHNL_OUT_BUF1_ADDR_U,
lower_32_bits(dma_addrs[1]));
mxc_isi_write(pipe, CHNL_OUT_BUF1_ADDR_V,
lower_32_bits(dma_addrs[2]));
if (pipe->isi->pdata->has_36bit_dma) {
mxc_isi_write(pipe, CHNL_Y_BUF1_XTND_ADDR,
dma_addrs[0] >> 32);
upper_32_bits(dma_addrs[0]));
mxc_isi_write(pipe, CHNL_U_BUF1_XTND_ADDR,
dma_addrs[1] >> 32);
upper_32_bits(dma_addrs[1]));
mxc_isi_write(pipe, CHNL_V_BUF1_XTND_ADDR,
dma_addrs[2] >> 32);
upper_32_bits(dma_addrs[2]));
}
#endif
val ^= CHNL_OUT_BUF_CTRL_LOAD_BUF1_ADDR;
} else {
mxc_isi_write(pipe, CHNL_OUT_BUF2_ADDR_Y, dma_addrs[0]);
mxc_isi_write(pipe, CHNL_OUT_BUF2_ADDR_U, dma_addrs[1]);
mxc_isi_write(pipe, CHNL_OUT_BUF2_ADDR_V, dma_addrs[2]);
#if CONFIG_ARCH_DMA_ADDR_T_64BIT
mxc_isi_write(pipe, CHNL_OUT_BUF2_ADDR_Y,
lower_32_bits(dma_addrs[0]));
mxc_isi_write(pipe, CHNL_OUT_BUF2_ADDR_U,
lower_32_bits(dma_addrs[1]));
mxc_isi_write(pipe, CHNL_OUT_BUF2_ADDR_V,
lower_32_bits(dma_addrs[2]));
if (pipe->isi->pdata->has_36bit_dma) {
mxc_isi_write(pipe, CHNL_Y_BUF2_XTND_ADDR,
dma_addrs[0] >> 32);
upper_32_bits(dma_addrs[0]));
mxc_isi_write(pipe, CHNL_U_BUF2_XTND_ADDR,
dma_addrs[1] >> 32);
upper_32_bits(dma_addrs[1]));
mxc_isi_write(pipe, CHNL_V_BUF2_XTND_ADDR,
dma_addrs[2] >> 32);
upper_32_bits(dma_addrs[2]));
}
#endif
val ^= CHNL_OUT_BUF_CTRL_LOAD_BUF2_ADDR;
}

View File

@ -728,11 +728,9 @@ static int rvin_setup(struct rvin_dev *vin)
case V4L2_FIELD_SEQ_TB:
case V4L2_FIELD_SEQ_BT:
case V4L2_FIELD_NONE:
vnmc = VNMC_IM_ODD_EVEN;
progressive = true;
break;
case V4L2_FIELD_ALTERNATE:
vnmc = VNMC_IM_ODD_EVEN;
progressive = true;
break;
default:
vnmc = VNMC_IM_ODD;
@ -1312,12 +1310,23 @@ static int rvin_mc_validate_format(struct rvin_dev *vin, struct v4l2_subdev *sd,
}
if (rvin_scaler_needed(vin)) {
/* Gen3 can't scale NV12 */
if (vin->info->model == RCAR_GEN3 &&
vin->format.pixelformat == V4L2_PIX_FMT_NV12)
return -EPIPE;
if (!vin->scaler)
return -EPIPE;
} else {
if (fmt.format.width != vin->format.width ||
fmt.format.height != vin->format.height)
return -EPIPE;
if (vin->format.pixelformat == V4L2_PIX_FMT_NV12) {
if (ALIGN(fmt.format.width, 32) != vin->format.width ||
ALIGN(fmt.format.height, 32) != vin->format.height)
return -EPIPE;
} else {
if (fmt.format.width != vin->format.width ||
fmt.format.height != vin->format.height)
return -EPIPE;
}
}
if (fmt.format.code != vin->mbus_code)

View File

@ -84,6 +84,11 @@ nla_put_failure:
return -EMSGSIZE;
}
/* Limit the max delay range to 300s */
static struct netlink_range_validation delay_range = {
.max = 300000,
};
static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
[IFLA_BOND_MODE] = { .type = NLA_U8 },
[IFLA_BOND_ACTIVE_SLAVE] = { .type = NLA_U32 },
@ -114,7 +119,7 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
[IFLA_BOND_AD_ACTOR_SYSTEM] = { .type = NLA_BINARY,
.len = ETH_ALEN },
[IFLA_BOND_TLB_DYNAMIC_LB] = { .type = NLA_U8 },
[IFLA_BOND_PEER_NOTIF_DELAY] = { .type = NLA_U32 },
[IFLA_BOND_PEER_NOTIF_DELAY] = NLA_POLICY_FULL_RANGE(NLA_U32, &delay_range),
[IFLA_BOND_MISSED_MAX] = { .type = NLA_U8 },
[IFLA_BOND_NS_IP6_TARGET] = { .type = NLA_NESTED },
};

View File

@ -169,6 +169,12 @@ static const struct bond_opt_value bond_num_peer_notif_tbl[] = {
{ NULL, -1, 0}
};
static const struct bond_opt_value bond_peer_notif_delay_tbl[] = {
{ "off", 0, 0},
{ "maxval", 300000, BOND_VALFLAG_MAX},
{ NULL, -1, 0}
};
static const struct bond_opt_value bond_primary_reselect_tbl[] = {
{ "always", BOND_PRI_RESELECT_ALWAYS, BOND_VALFLAG_DEFAULT},
{ "better", BOND_PRI_RESELECT_BETTER, 0},
@ -488,7 +494,7 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.id = BOND_OPT_PEER_NOTIF_DELAY,
.name = "peer_notif_delay",
.desc = "Delay between each peer notification on failover event, in milliseconds",
.values = bond_intmax_tbl,
.values = bond_peer_notif_delay_tbl,
.set = bond_option_peer_notif_delay_set
}
};

View File

@ -294,19 +294,6 @@ static int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
bool reschedule = false;
int work_done = 0;
/* Clear PCI MSI-X Pending Bit Array (PBA)
*
* This bit is set if an interrupt event occurs while the vector is
* masked. If this bit is set and we reenable the interrupt, it will
* fire again. Since we're just about to poll the queue state, we don't
* need it to fire again.
*
* Under high softirq load, it's possible that the interrupt condition
* is triggered twice before we got the chance to process it.
*/
gve_write_irq_doorbell_dqo(priv, block,
GVE_ITR_NO_UPDATE_DQO | GVE_ITR_CLEAR_PBA_BIT_DQO);
if (block->tx)
reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true);

View File

@ -654,7 +654,7 @@ __mtk_wed_detach(struct mtk_wed_device *dev)
BIT(hw->index), BIT(hw->index));
}
if (!hw_list[!hw->index]->wed_dev &&
if ((!hw_list[!hw->index] || !hw_list[!hw->index]->wed_dev) &&
hw->eth->dma_dev != hw->eth->dev)
mtk_eth_set_dma_device(hw->eth, hw->eth->dev);

View File

@ -307,15 +307,15 @@ static const u32 vsc7514_sys_regmap[] = {
REG(SYS_COUNT_DROP_YELLOW_PRIO_4, 0x000218),
REG(SYS_COUNT_DROP_YELLOW_PRIO_5, 0x00021c),
REG(SYS_COUNT_DROP_YELLOW_PRIO_6, 0x000220),
REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000214),
REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000218),
REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00021c),
REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000220),
REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000224),
REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000228),
REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00022c),
REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000230),
REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000234),
REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000224),
REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000228),
REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00022c),
REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000230),
REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000234),
REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000238),
REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00023c),
REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000240),
REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000244),
REG(SYS_RESET_CFG, 0x000508),
REG(SYS_CMID, 0x00050c),
REG(SYS_VLAN_ETYPE_CFG, 0x000510),

View File

@ -181,6 +181,7 @@ enum power_event {
#define GMAC4_LPI_CTRL_STATUS 0xd0
#define GMAC4_LPI_TIMER_CTRL 0xd4
#define GMAC4_LPI_ENTRY_TIMER 0xd8
#define GMAC4_MAC_ONEUS_TIC_COUNTER 0xdc
/* LPI control and status defines */
#define GMAC4_LPI_CTRL_STATUS_LPITCSE BIT(21) /* LPI Tx Clock Stop Enable */

View File

@ -25,6 +25,7 @@ static void dwmac4_core_init(struct mac_device_info *hw,
struct stmmac_priv *priv = netdev_priv(dev);
void __iomem *ioaddr = hw->pcsr;
u32 value = readl(ioaddr + GMAC_CONFIG);
u32 clk_rate;
value |= GMAC_CORE_INIT;
@ -47,6 +48,10 @@ static void dwmac4_core_init(struct mac_device_info *hw,
writel(value, ioaddr + GMAC_CONFIG);
/* Configure LPI 1us counter to number of CSR clock ticks in 1us - 1 */
clk_rate = clk_get_rate(priv->plat->stmmac_clk);
writel((clk_rate / 1000000) - 1, ioaddr + GMAC4_MAC_ONEUS_TIC_COUNTER);
/* Enable GMAC interrupts */
value = GMAC_INT_DEFAULT_ENABLE;

View File

@ -436,6 +436,9 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb)
goto err;
}
skb_dst_set(skb, &rt->dst);
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
err = ip_local_out(net, skb->sk, skb);
if (unlikely(net_xmit_eval(err)))
dev->stats.tx_errors++;
@ -474,6 +477,9 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
goto err;
}
skb_dst_set(skb, dst);
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
err = ip6_local_out(net, skb->sk, skb);
if (unlikely(net_xmit_eval(err)))
dev->stats.tx_errors++;

View File

@ -67,6 +67,7 @@ static int mvusb_mdio_probe(struct usb_interface *interface,
struct device *dev = &interface->dev;
struct mvusb_mdio *mvusb;
struct mii_bus *mdio;
int ret;
mdio = devm_mdiobus_alloc_size(dev, sizeof(*mvusb));
if (!mdio)
@ -87,7 +88,15 @@ static int mvusb_mdio_probe(struct usb_interface *interface,
mdio->write = mvusb_mdio_write;
usb_set_intfdata(interface, mvusb);
return of_mdiobus_register(mdio, dev->of_node);
ret = of_mdiobus_register(mdio, dev->of_node);
if (ret)
goto put_dev;
return 0;
put_dev:
usb_put_dev(mvusb->udev);
return ret;
}
static void mvusb_mdio_disconnect(struct usb_interface *interface)

View File

@ -1203,7 +1203,7 @@ static const struct xpcs_compat synopsys_xpcs_compat[DW_XPCS_INTERFACE_MAX] = {
[DW_XPCS_2500BASEX] = {
.supported = xpcs_2500basex_features,
.interface = xpcs_2500basex_interfaces,
.num_interfaces = ARRAY_SIZE(xpcs_2500basex_features),
.num_interfaces = ARRAY_SIZE(xpcs_2500basex_interfaces),
.an_mode = DW_2500BASEX,
},
};

View File

@ -40,6 +40,11 @@ static inline int bcm_phy_write_exp_sel(struct phy_device *phydev,
return bcm_phy_write_exp(phydev, reg | MII_BCM54XX_EXP_SEL_ER, val);
}
static inline int bcm_phy_read_exp_sel(struct phy_device *phydev, u16 reg)
{
return bcm_phy_read_exp(phydev, reg | MII_BCM54XX_EXP_SEL_ER);
}
int bcm54xx_auxctl_write(struct phy_device *phydev, u16 regnum, u16 val);
int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum);

View File

@ -486,7 +486,7 @@ static int bcm7xxx_16nm_ephy_afe_config(struct phy_device *phydev)
bcm_phy_write_misc(phydev, 0x0038, 0x0002, 0xede0);
/* Read CORE_EXPA9 */
tmp = bcm_phy_read_exp(phydev, 0x00a9);
tmp = bcm_phy_read_exp_sel(phydev, 0x00a9);
/* CORE_EXPA9[6:1] is rcalcode[5:0] */
rcalcode = (tmp & 0x7e) / 2;
/* Correct RCAL code + 1 is -1% rprogr, LP: +16 */

View File

@ -742,7 +742,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
/* Move network header to the right position for VLAN tagged packets */
if (eth_type_vlan(skb->protocol) &&
__vlan_get_protocol(skb, skb->protocol, &depth) != 0)
vlan_get_protocol_and_depth(skb, skb->protocol, &depth) != 0)
skb_set_network_header(skb, depth);
/* copy skb_ubuf_info for callback when skb has no error */
@ -1197,7 +1197,7 @@ static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp)
/* Move network header to the right position for VLAN tagged packets */
if (eth_type_vlan(skb->protocol) &&
__vlan_get_protocol(skb, skb->protocol, &depth) != 0)
vlan_get_protocol_and_depth(skb, skb->protocol, &depth) != 0)
skb_set_network_header(skb, depth);
rcu_read_lock();

View File

@ -784,7 +784,7 @@ static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx)
fifo = vring->fifo;
/* Return if vdev is not ready. */
if (!fifo->vdev[devid])
if (!fifo || !fifo->vdev[devid])
return;
/* Return if another vring is running. */
@ -980,9 +980,13 @@ static int mlxbf_tmfifo_virtio_find_vqs(struct virtio_device *vdev,
vq->num_max = vring->num;
vq->priv = vring;
/* Make vq update visible before using it. */
virtio_mb(false);
vqs[i] = vq;
vring->vq = vq;
vq->priv = vring;
}
return 0;
@ -1302,6 +1306,9 @@ static int mlxbf_tmfifo_probe(struct platform_device *pdev)
mod_timer(&fifo->timer, jiffies + MLXBF_TMFIFO_TIMER_INTERVAL);
/* Make all updates visible before setting the 'is_ready' flag. */
virtio_mb(false);
fifo->is_ready = true;
return 0;

View File

@ -211,6 +211,7 @@ struct bios_rfkill2_state {
static const struct key_entry hp_wmi_keymap[] = {
{ KE_KEY, 0x02, { KEY_BRIGHTNESSUP } },
{ KE_KEY, 0x03, { KEY_BRIGHTNESSDOWN } },
{ KE_KEY, 0x270, { KEY_MICMUTE } },
{ KE_KEY, 0x20e6, { KEY_PROG1 } },
{ KE_KEY, 0x20e8, { KEY_MEDIA } },
{ KE_KEY, 0x2142, { KEY_MEDIA } },

View File

@ -44,14 +44,18 @@ static ssize_t store_min_max_freq_khz(struct uncore_data *data,
int min_max)
{
unsigned int input;
int ret;
if (kstrtouint(buf, 10, &input))
return -EINVAL;
mutex_lock(&uncore_lock);
uncore_write(data, input, min_max);
ret = uncore_write(data, input, min_max);
mutex_unlock(&uncore_lock);
if (ret)
return ret;
return count;
}

View File

@ -34,6 +34,7 @@ static int intel_scu_pci_probe(struct pci_dev *pdev,
static const struct pci_device_id pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x080e) },
{ PCI_VDEVICE(INTEL, 0x082a) },
{ PCI_VDEVICE(INTEL, 0x08ea) },
{ PCI_VDEVICE(INTEL, 0x0a94) },
{ PCI_VDEVICE(INTEL, 0x11a0) },

View File

@ -10318,6 +10318,7 @@ static atomic_t dytc_ignore_event = ATOMIC_INIT(0);
static DEFINE_MUTEX(dytc_mutex);
static int dytc_capabilities;
static bool dytc_mmc_get_available;
static int profile_force;
static int convert_dytc_to_profile(int funcmode, int dytcmode,
enum platform_profile_option *profile)
@ -10580,6 +10581,21 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
if (err)
return err;
/* Check if user wants to override the profile selection */
if (profile_force) {
switch (profile_force) {
case -1:
dytc_capabilities = 0;
break;
case 1:
dytc_capabilities = BIT(DYTC_FC_MMC);
break;
case 2:
dytc_capabilities = BIT(DYTC_FC_PSC);
break;
}
pr_debug("Profile selection forced: 0x%x\n", dytc_capabilities);
}
if (dytc_capabilities & BIT(DYTC_FC_MMC)) { /* MMC MODE */
pr_debug("MMC is supported\n");
/*
@ -10593,11 +10609,6 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
dytc_mmc_get_available = true;
}
} else if (dytc_capabilities & BIT(DYTC_FC_PSC)) { /* PSC MODE */
/* Support for this only works on AMD platforms */
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
dbg_printk(TPACPI_DBG_INIT, "PSC not support on Intel platforms\n");
return -ENODEV;
}
pr_debug("PSC is supported\n");
} else {
dbg_printk(TPACPI_DBG_INIT, "No DYTC support available\n");
@ -11646,6 +11657,9 @@ MODULE_PARM_DESC(uwb_state,
"Initial state of the emulated UWB switch");
#endif
module_param(profile_force, int, 0444);
MODULE_PARM_DESC(profile_force, "Force profile mode. -1=off, 1=MMC, 2=PSC");
static void thinkpad_acpi_module_exit(void)
{
struct ibm_struct *ibm, *itmp;

View File

@ -336,6 +336,22 @@ static const struct ts_dmi_data dexp_ursus_7w_data = {
.properties = dexp_ursus_7w_props,
};
static const struct property_entry dexp_ursus_kx210i_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 5),
PROPERTY_ENTRY_U32("touchscreen-min-y", 2),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1720),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1137),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-dexp-ursus-kx210i.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
static const struct ts_dmi_data dexp_ursus_kx210i_data = {
.acpi_name = "MSSL1680:00",
.properties = dexp_ursus_kx210i_props,
};
static const struct property_entry digma_citi_e200_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
@ -378,6 +394,11 @@ static const struct ts_dmi_data gdix1001_01_upside_down_data = {
.properties = gdix1001_upside_down_props,
};
static const struct ts_dmi_data gdix1002_00_upside_down_data = {
.acpi_name = "GDIX1002:00",
.properties = gdix1001_upside_down_props,
};
static const struct property_entry gp_electronic_t701_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 960),
PROPERTY_ENTRY_U32("touchscreen-size-y", 640),
@ -1185,6 +1206,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "7W"),
},
},
{
/* DEXP Ursus KX210i */
.driver_data = (void *)&dexp_ursus_kx210i_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "INSYDE Corp."),
DMI_MATCH(DMI_PRODUCT_NAME, "S107I"),
},
},
{
/* Digma Citi E200 */
.driver_data = (void *)&digma_citi_e200_data,
@ -1295,6 +1324,18 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
DMI_MATCH(DMI_BIOS_VERSION, "jumperx.T87.KFBNEEA"),
},
},
{
/* Juno Tablet */
.driver_data = (void *)&gdix1002_00_upside_down_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Default string"),
/* Both product- and board-name being "Default string" is somewhat rare */
DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
DMI_MATCH(DMI_BOARD_NAME, "Default string"),
/* Above matches are too generic, add partial bios-version match */
DMI_MATCH(DMI_BIOS_VERSION, "JP2V1."),
},
},
{
/* Mediacom WinPad 7.0 W700 (same hw as Wintron surftab 7") */
.driver_data = (void *)&trekstor_surftab_wintron70_data,

View File

@ -9459,8 +9459,16 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
* that performance might be impacted.
*/
ret = ufshcd_urgent_bkops(hba);
if (ret)
if (ret) {
/*
* If return err in suspend flow, IO will hang.
* Trigger error handler and break suspend for
* error recovery.
*/
ufshcd_force_error_recovery(hba);
ret = -EBUSY;
goto enable_scaling;
}
} else {
/* make sure that auto bkops is disabled */
ufshcd_disable_auto_bkops(hba);

View File

@ -124,7 +124,7 @@ static u_long get_line_length(int xres_virtual, int bpp)
* First part, xxxfb_check_var, must not write anything
* to hardware, it should only verify and adjust var.
* This means it doesn't alter par but it does use hardware
* data from it to check this var.
* data from it to check this var.
*/
static int mc68x328fb_check_var(struct fb_var_screeninfo *var,
@ -182,7 +182,7 @@ static int mc68x328fb_check_var(struct fb_var_screeninfo *var,
/*
* Now that we checked it we alter var. The reason being is that the video
* mode passed in might not work but slight changes to it might make it
* mode passed in might not work but slight changes to it might make it
* work. This way we let the user know what is acceptable.
*/
switch (var->bits_per_pixel) {
@ -257,8 +257,8 @@ static int mc68x328fb_check_var(struct fb_var_screeninfo *var,
}
/* This routine actually sets the video mode. It's in here where we
* the hardware state info->par and fix which can be affected by the
* change in par. For this driver it doesn't do much.
* the hardware state info->par and fix which can be affected by the
* change in par. For this driver it doesn't do much.
*/
static int mc68x328fb_set_par(struct fb_info *info)
{
@ -295,7 +295,7 @@ static int mc68x328fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
* {hardwarespecific} contains width of RAMDAC
* cmap[X] is programmed to (X << red.offset) | (X << green.offset) | (X << blue.offset)
* RAMDAC[X] is programmed to (red, green, blue)
*
*
* Pseudocolor:
* uses offset = 0 && length = RAMDAC register width.
* var->{color}.offset is 0
@ -384,7 +384,7 @@ static int mc68x328fb_pan_display(struct fb_var_screeninfo *var,
}
/*
* Most drivers don't need their own mmap function
* Most drivers don't need their own mmap function
*/
static int mc68x328fb_mmap(struct fb_info *info, struct vm_area_struct *vma)

View File

@ -523,7 +523,7 @@ static int arcfb_probe(struct platform_device *dev)
info = framebuffer_alloc(sizeof(struct arcfb_par), &dev->dev);
if (!info)
goto err;
goto err_fb_alloc;
info->screen_base = (char __iomem *)videomemory;
info->fbops = &arcfb_ops;
@ -535,7 +535,7 @@ static int arcfb_probe(struct platform_device *dev)
if (!dio_addr || !cio_addr || !c2io_addr) {
printk(KERN_WARNING "no IO addresses supplied\n");
goto err1;
goto err_addr;
}
par->dio_addr = dio_addr;
par->cio_addr = cio_addr;
@ -551,12 +551,12 @@ static int arcfb_probe(struct platform_device *dev)
printk(KERN_INFO
"arcfb: Failed req IRQ %d\n", par->irq);
retval = -EBUSY;
goto err1;
goto err_addr;
}
}
retval = register_framebuffer(info);
if (retval < 0)
goto err1;
goto err_register_fb;
platform_set_drvdata(dev, info);
fb_info(info, "Arc frame buffer device, using %dK of video memory\n",
videomemorysize >> 10);
@ -580,9 +580,12 @@ static int arcfb_probe(struct platform_device *dev)
}
return 0;
err1:
err_register_fb:
free_irq(par->irq, info);
err_addr:
framebuffer_release(info);
err:
err_fb_alloc:
vfree(videomemory);
return retval;
}

View File

@ -317,7 +317,7 @@ static inline void atmel_lcdfb_free_video_memory(struct atmel_lcdfb_info *sinfo)
/**
* atmel_lcdfb_alloc_video_memory - Allocate framebuffer memory
* @sinfo: the frame buffer to allocate memory for
*
*
* This function is called only from the atmel_lcdfb_probe()
* so no locking by fb_info->mm_lock around smem_len setting is needed.
*/

View File

@ -512,7 +512,7 @@ static int cg14_probe(struct platform_device *op)
is_8mb = (resource_size(&op->resource[1]) == (8 * 1024 * 1024));
BUILD_BUG_ON(sizeof(par->mmap_map) != sizeof(__cg14_mmap_map));
memcpy(&par->mmap_map, &__cg14_mmap_map, sizeof(par->mmap_map));
for (i = 0; i < CG14_MMAP_ENTRIES; i++) {

View File

@ -113,14 +113,14 @@ struct fb_info_control {
struct fb_info info;
struct fb_par_control par;
u32 pseudo_palette[16];
struct cmap_regs __iomem *cmap_regs;
unsigned long cmap_regs_phys;
struct control_regs __iomem *control_regs;
unsigned long control_regs_phys;
unsigned long control_regs_size;
__u8 __iomem *frame_buffer;
unsigned long frame_buffer_phys;
unsigned long fb_orig_base;
@ -196,7 +196,7 @@ static void set_control_clock(unsigned char *params)
while (!req.complete)
cuda_poll();
}
#endif
#endif
}
/*
@ -233,19 +233,19 @@ static void control_set_hardware(struct fb_info_control *p, struct fb_par_contro
if (p->par.xoffset != par->xoffset ||
p->par.yoffset != par->yoffset)
set_screen_start(par->xoffset, par->yoffset, p);
return;
}
p->par = *par;
cmode = p->par.cmode;
r = &par->regvals;
/* Turn off display */
out_le32(CNTRL_REG(p,ctrl), 0x400 | par->ctrl);
set_control_clock(r->clock_params);
RADACAL_WRITE(0x20, r->radacal_ctrl);
RADACAL_WRITE(0x21, p->control_use_bank2 ? 0 : 1);
RADACAL_WRITE(0x10, 0);
@ -254,7 +254,7 @@ static void control_set_hardware(struct fb_info_control *p, struct fb_par_contro
rp = &p->control_regs->vswin;
for (i = 0; i < 16; ++i, ++rp)
out_le32(&rp->r, r->regs[i]);
out_le32(CNTRL_REG(p,pitch), par->pitch);
out_le32(CNTRL_REG(p,mode), r->mode);
out_le32(CNTRL_REG(p,vram_attr), p->vram_attr);
@ -366,7 +366,7 @@ static int read_control_sense(struct fb_info_control *p)
sense |= (in_le32(CNTRL_REG(p,mon_sense)) & 0x180) >> 7;
out_le32(CNTRL_REG(p,mon_sense), 077); /* turn off drivers */
return sense;
}
@ -558,9 +558,9 @@ static int control_var_to_par(struct fb_var_screeninfo *var,
static void control_par_to_var(struct fb_par_control *par, struct fb_var_screeninfo *var)
{
struct control_regints *rv;
rv = (struct control_regints *) par->regvals.regs;
memset(var, 0, sizeof(*var));
var->xres = par->xres;
var->yres = par->yres;
@ -568,7 +568,7 @@ static void control_par_to_var(struct fb_par_control *par, struct fb_var_screeni
var->yres_virtual = par->vyres;
var->xoffset = par->xoffset;
var->yoffset = par->yoffset;
switch(par->cmode) {
default:
case CMODE_8:
@ -634,7 +634,7 @@ static int controlfb_check_var (struct fb_var_screeninfo *var, struct fb_info *i
err = control_var_to_par(var, &par, info);
if (err)
return err;
return err;
control_par_to_var(&par, var);
return 0;
@ -655,7 +655,7 @@ static int controlfb_set_par (struct fb_info *info)
" control_var_to_par: %d.\n", err);
return err;
}
control_set_hardware(p, &par);
info->fix.visual = (p->par.cmode == CMODE_8) ?
@ -840,7 +840,7 @@ static int __init init_control(struct fb_info_control *p)
int full, sense, vmode, cmode, vyres;
struct fb_var_screeninfo var;
int rc;
printk(KERN_INFO "controlfb: ");
full = p->total_vram == 0x400000;

View File

@ -257,6 +257,11 @@ static const struct fb_videomode modedb[] = {
{ NULL, 72, 480, 300, 33386, 40, 24, 11, 19, 80, 3, 0,
FB_VMODE_DOUBLE },
/* 1920x1080 @ 60 Hz, 67.3 kHz hsync */
{ NULL, 60, 1920, 1080, 6734, 148, 88, 36, 4, 44, 5, 0,
FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
FB_VMODE_NONINTERLACED },
/* 1920x1200 @ 60 Hz, 74.5 Khz hsync */
{ NULL, 60, 1920, 1200, 5177, 128, 336, 1, 38, 208, 3,
FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,

View File

@ -6,7 +6,7 @@
*
* This driver is based on tgafb.c
*
* Copyright (C) 1997 Geert Uytterhoeven
* Copyright (C) 1997 Geert Uytterhoeven
* Copyright (C) 1995 Jay Estabrook
*
* This file is subject to the terms and conditions of the GNU General Public
@ -28,7 +28,7 @@
#include <asm/io.h>
#include <asm/jazz.h>
/*
/*
* Various defines for the G364
*/
#define G364_MEM_BASE 0xe4400000
@ -125,7 +125,7 @@ static const struct fb_ops g364fb_ops = {
*
* This call looks only at xoffset, yoffset and the FB_VMODE_YWRAP flag
*/
static int g364fb_pan_display(struct fb_var_screeninfo *var,
static int g364fb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
if (var->xoffset ||

View File

@ -1,6 +1,6 @@
/*
* linux/drivers/video/hgafb.c -- Hercules graphics adaptor frame buffer device
*
*
* Created 25 Nov 1999 by Ferenc Bakonyi (fero@drama.obuda.kando.hu)
* Based on skeletonfb.c by Geert Uytterhoeven and
* mdacon.c by Andrew Apted
@ -8,14 +8,14 @@
* History:
*
* - Revision 0.1.8 (23 Oct 2002): Ported to new framebuffer api.
*
* - Revision 0.1.7 (23 Jan 2001): fix crash resulting from MDA only cards
*
* - Revision 0.1.7 (23 Jan 2001): fix crash resulting from MDA only cards
* being detected as Hercules. (Paul G.)
* - Revision 0.1.6 (17 Aug 2000): new style structs
* documentation
* - Revision 0.1.5 (13 Mar 2000): spinlocks instead of saveflags();cli();etc
* minor fixes
* - Revision 0.1.4 (24 Jan 2000): fixed a bug in hga_card_detect() for
* - Revision 0.1.4 (24 Jan 2000): fixed a bug in hga_card_detect() for
* HGA-only systems
* - Revision 0.1.3 (22 Jan 2000): modified for the new fb_info structure
* screen is cleared after rmmod
@ -143,7 +143,7 @@ static bool nologo = 0;
static void write_hga_b(unsigned int val, unsigned char reg)
{
outb_p(reg, HGA_INDEX_PORT);
outb_p(reg, HGA_INDEX_PORT);
outb_p(val, HGA_VALUE_PORT);
}
@ -155,7 +155,7 @@ static void write_hga_w(unsigned int val, unsigned char reg)
static int test_hga_b(unsigned char val, unsigned char reg)
{
outb_p(reg, HGA_INDEX_PORT);
outb_p(reg, HGA_INDEX_PORT);
outb (val, HGA_VALUE_PORT);
udelay(20); val = (inb_p(HGA_VALUE_PORT) == val);
return val;
@ -244,7 +244,7 @@ static void hga_show_logo(struct fb_info *info)
void __iomem *dest = hga_vram;
char *logo = linux_logo_bw;
int x, y;
for (y = 134; y < 134 + 80 ; y++) * this needs some cleanup *
for (x = 0; x < 10 ; x++)
writeb(~*(logo++),(dest + HGA_ROWADDR(y) + x + 40));
@ -255,7 +255,7 @@ static void hga_pan(unsigned int xoffset, unsigned int yoffset)
{
unsigned int base;
unsigned long flags;
base = (yoffset / 8) * 90 + xoffset;
spin_lock_irqsave(&hga_reg_lock, flags);
write_hga_w(base, 0x0c); /* start address */
@ -310,7 +310,7 @@ static int hga_card_detect(void)
/* Ok, there is definitely a card registering at the correct
* memory location, so now we do an I/O port test.
*/
if (!test_hga_b(0x66, 0x0f)) /* cursor low register */
goto error;
@ -321,7 +321,7 @@ static int hga_card_detect(void)
* bit of the status register is changing. This test lasts for
* approximately 1/10th of a second.
*/
p_save = q_save = inb_p(HGA_STATUS_PORT) & HGA_STATUS_VSYNC;
for (count=0; count < 50000 && p_save == q_save; count++) {
@ -329,7 +329,7 @@ static int hga_card_detect(void)
udelay(2);
}
if (p_save == q_save)
if (p_save == q_save)
goto error;
switch (inb_p(HGA_STATUS_PORT) & 0x70) {
@ -415,7 +415,7 @@ static int hgafb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
* @info:pointer to fb_info object containing info for current hga board
*
* This function looks only at xoffset, yoffset and the %FB_VMODE_YWRAP
* flag in @var. If input parameters are correct it calls hga_pan() to
* flag in @var. If input parameters are correct it calls hga_pan() to
* program the hardware. @info->var is updated to the new values.
* A zero is returned on success and %-EINVAL for failure.
*/
@ -442,9 +442,9 @@ static int hgafb_pan_display(struct fb_var_screeninfo *var,
* hgafb_blank - (un)blank the screen
* @blank_mode:blanking method to use
* @info:unused
*
* Blank the screen if blank_mode != 0, else unblank.
* Implements VESA suspend and powerdown modes on hardware that supports
*
* Blank the screen if blank_mode != 0, else unblank.
* Implements VESA suspend and powerdown modes on hardware that supports
* disabling hsync/vsync:
* @blank_mode == 2 means suspend vsync,
* @blank_mode == 3 means suspend hsync,
@ -539,15 +539,15 @@ static const struct fb_ops hgafb_ops = {
.fb_copyarea = hgafb_copyarea,
.fb_imageblit = hgafb_imageblit,
};
/* ------------------------------------------------------------------------- *
*
* Functions in fb_info
*
*
* ------------------------------------------------------------------------- */
/* ------------------------------------------------------------------------- */
/*
* Initialization
*/

View File

@ -92,7 +92,7 @@ static int hpfb_setcolreg(unsigned regno, unsigned red, unsigned green,
if (regno >= info->cmap.len)
return 1;
while (in_be16(fb_regs + 0x6002) & 0x4) udelay(1);
out_be16(fb_regs + 0x60ba, 0xff);
@ -143,7 +143,7 @@ static void topcat_blit(int x0, int y0, int x1, int y1, int w, int h, int rr)
out_8(fb_regs + WMOVE, fb_bitmask);
}
static void hpfb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
static void hpfb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
{
topcat_blit(area->sx, area->sy, area->dx, area->dy, area->width, area->height, RR_COPY);
}
@ -315,7 +315,7 @@ unmap_screen_base:
return ret;
}
/*
/*
* Check that the secondary ID indicates that we have some hope of working with this
* framebuffer. The catseye boards are pretty much like topcats and we can muddle through.
*/
@ -323,7 +323,7 @@ unmap_screen_base:
#define topcat_sid_ok(x) (((x) == DIO_ID2_LRCATSEYE) || ((x) == DIO_ID2_HRCCATSEYE) \
|| ((x) == DIO_ID2_HRMCATSEYE) || ((x) == DIO_ID2_TOPCAT))
/*
/*
* Initialise the framebuffer
*/
static int hpfb_dio_probe(struct dio_dev *d, const struct dio_device_id *ent)

View File

@ -1347,7 +1347,7 @@ static const struct fb_ops imsttfb_ops = {
.fb_ioctl = imsttfb_ioctl,
};
static void init_imstt(struct fb_info *info)
static int init_imstt(struct fb_info *info)
{
struct imstt_par *par = info->par;
__u32 i, tmp, *ip, *end;
@ -1420,7 +1420,7 @@ static void init_imstt(struct fb_info *info)
|| !(compute_imstt_regvals(par, info->var.xres, info->var.yres))) {
printk("imsttfb: %ux%ux%u not supported\n", info->var.xres, info->var.yres, info->var.bits_per_pixel);
framebuffer_release(info);
return;
return -ENODEV;
}
sprintf(info->fix.id, "IMS TT (%s)", par->ramdac == IBM ? "IBM" : "TVP");
@ -1456,12 +1456,13 @@ static void init_imstt(struct fb_info *info)
if (register_framebuffer(info) < 0) {
framebuffer_release(info);
return;
return -ENODEV;
}
tmp = (read_reg_le32(par->dc_regs, SSTATUS) & 0x0f00) >> 8;
fb_info(info, "%s frame buffer; %uMB vram; chip version %u\n",
info->fix.id, info->fix.smem_len >> 20, tmp);
return 0;
}
static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@ -1529,10 +1530,10 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!par->cmap_regs)
goto error;
info->pseudo_palette = par->palette;
init_imstt(info);
pci_set_drvdata(pdev, info);
return 0;
ret = init_imstt(info);
if (!ret)
pci_set_drvdata(pdev, info);
return ret;
error:
if (par->dc_regs)

View File

@ -339,7 +339,7 @@ static int civic_setpalette(unsigned int regno, unsigned int red,
{
unsigned long flags;
int clut_status;
local_irq_save(flags);
/* Set the register address */
@ -439,7 +439,7 @@ static int macfb_setcolreg(unsigned regno, unsigned red, unsigned green,
* (according to the entries in the `var' structure).
* Return non-zero for invalid regno.
*/
if (regno >= fb_info->cmap.len)
return 1;
@ -548,7 +548,7 @@ static int __init macfb_init(void)
return -ENODEV;
macfb_setup(option);
if (!MACH_IS_MAC)
if (!MACH_IS_MAC)
return -ENODEV;
if (mac_bi_data.id == MAC_MODEL_Q630 ||
@ -644,7 +644,7 @@ static int __init macfb_init(void)
err = -EINVAL;
goto fail_unmap;
}
/*
* We take a wild guess that if the video physical address is
* in nubus slot space, that the nubus card is driving video.
@ -774,7 +774,7 @@ static int __init macfb_init(void)
civic_cmap_regs = ioremap(CIVIC_BASE, 0x1000);
break;
/*
* Assorted weirdos
* We think this may be like the LC II

View File

@ -138,7 +138,7 @@ int __init maxinefb_init(void)
*(volatile unsigned char *)fboff = 0x0;
maxinefb_fix.smem_start = fb_start;
/* erase hardware cursor */
for (i = 0; i < 512; i++) {
maxinefb_ims332_write_register(IMS332_REG_CURSOR_RAM + i,

View File

@ -65,7 +65,7 @@ static const struct fb_ops p9100_ops = {
#define P9100_FB_OFF 0x0UL
/* 3 bits: 2=8bpp 3=16bpp 5=32bpp 7=24bpp */
#define SYS_CONFIG_PIXELSIZE_SHIFT 26
#define SYS_CONFIG_PIXELSIZE_SHIFT 26
#define SCREENPAINT_TIMECTL1_ENABLE_VIDEO 0x20 /* 0 = off, 1 = on */
@ -110,7 +110,7 @@ struct p9100_regs {
u32 vram_xxx[25];
/* Registers for IBM RGB528 Palette */
u32 ramdac_cmap_wridx;
u32 ramdac_cmap_wridx;
u32 ramdac_palette_data;
u32 ramdac_pixel_mask;
u32 ramdac_palette_rdaddr;

View File

@ -52,17 +52,17 @@ struct fb_info_platinum {
__u8 red, green, blue;
} palette[256];
u32 pseudo_palette[16];
volatile struct cmap_regs __iomem *cmap_regs;
unsigned long cmap_regs_phys;
volatile struct platinum_regs __iomem *platinum_regs;
unsigned long platinum_regs_phys;
__u8 __iomem *frame_buffer;
volatile __u8 __iomem *base_frame_buffer;
unsigned long frame_buffer_phys;
unsigned long total_vram;
int clktype;
int dactype;
@ -133,7 +133,7 @@ static int platinumfb_set_par (struct fb_info *info)
platinum_set_hardware(pinfo);
init = platinum_reg_init[pinfo->vmode-1];
if ((pinfo->vmode == VMODE_832_624_75) && (pinfo->cmode > CMODE_8))
offset = 0x10;
@ -214,7 +214,7 @@ static int platinumfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
break;
}
}
return 0;
}
@ -269,7 +269,7 @@ static void platinum_set_hardware(struct fb_info_platinum *pinfo)
struct platinum_regvals *init;
int i;
int vmode, cmode;
vmode = pinfo->vmode;
cmode = pinfo->cmode;
@ -436,7 +436,7 @@ static int read_platinum_sense(struct fb_info_platinum *info)
* This routine takes a user-supplied var, and picks the best vmode/cmode from it.
* It also updates the var structure to the actual mode data obtained
*/
static int platinum_var_to_par(struct fb_var_screeninfo *var,
static int platinum_var_to_par(struct fb_var_screeninfo *var,
struct fb_info_platinum *pinfo,
int check_only)
{
@ -478,12 +478,12 @@ static int platinum_var_to_par(struct fb_var_screeninfo *var,
pinfo->yoffset = 0;
pinfo->vxres = pinfo->xres;
pinfo->vyres = pinfo->yres;
return 0;
}
/*
/*
* Parse user specified options (`video=platinumfb:')
*/
static int __init platinumfb_setup(char *options)
@ -624,7 +624,7 @@ static int platinumfb_probe(struct platform_device* odev)
break;
}
dev_set_drvdata(&odev->dev, info);
rc = platinum_init_fb(info);
if (rc != 0) {
iounmap(pinfo->frame_buffer);
@ -640,9 +640,9 @@ static void platinumfb_remove(struct platform_device* odev)
{
struct fb_info *info = dev_get_drvdata(&odev->dev);
struct fb_info_platinum *pinfo = info->par;
unregister_framebuffer (info);
/* Unmap frame buffer and registers */
iounmap(pinfo->frame_buffer);
iounmap(pinfo->platinum_regs);
@ -656,7 +656,7 @@ static void platinumfb_remove(struct platform_device* odev)
framebuffer_release(info);
}
static struct of_device_id platinumfb_match[] =
static struct of_device_id platinumfb_match[] =
{
{
.name = "platinum",
@ -664,7 +664,7 @@ static struct of_device_id platinumfb_match[] =
{},
};
static struct platform_driver platinum_driver =
static struct platform_driver platinum_driver =
{
.driver = {
.name = "platinumfb",

View File

@ -57,14 +57,14 @@
* - Driver appears to be working for Brutus 320x200x8bpp mode. Other
* resolutions are working, but only the 8bpp mode is supported.
* Changes need to be made to the palette encode and decode routines
* to support 4 and 16 bpp modes.
* to support 4 and 16 bpp modes.
* Driver is not designed to be a module. The FrameBuffer is statically
* allocated since dynamic allocation of a 300k buffer cannot be
* guaranteed.
* allocated since dynamic allocation of a 300k buffer cannot be
* guaranteed.
*
* 1999/06/17:
* - FrameBuffer memory is now allocated at run-time when the
* driver is initialized.
* driver is initialized.
*
* 2000/04/10: Nicolas Pitre <nico@fluxnic.net>
* - Big cleanup for dynamic selection of machine type at run time.
@ -74,8 +74,8 @@
*
* 2000/08/07: Tak-Shing Chan <tchan.rd@idthk.com>
* Jeff Sutherland <jsutherland@accelent.com>
* - Resolved an issue caused by a change made to the Assabet's PLD
* earlier this year which broke the framebuffer driver for newer
* - Resolved an issue caused by a change made to the Assabet's PLD
* earlier this year which broke the framebuffer driver for newer
* Phase 4 Assabets. Some other parameters were changed to optimize
* for the Sharp display.
*
@ -102,7 +102,7 @@
* 2000/11/23: Eric Peng <ericpeng@coventive.com>
* - Freebird add
*
* 2001/02/07: Jamey Hicks <jamey.hicks@compaq.com>
* 2001/02/07: Jamey Hicks <jamey.hicks@compaq.com>
* Cliff Brake <cbrake@accelent.com>
* - Added PM callback
*
@ -500,7 +500,7 @@ sa1100fb_set_cmap(struct fb_cmap *cmap, int kspc, int con,
* the shortest recovery time
* Suspend
* This refers to a level of power management in which substantial power
* reduction is achieved by the display. The display can have a longer
* reduction is achieved by the display. The display can have a longer
* recovery time from this state than from the Stand-by state
* Off
* This indicates that the display is consuming the lowest level of power
@ -522,9 +522,9 @@ sa1100fb_set_cmap(struct fb_cmap *cmap, int kspc, int con,
*/
/*
* sa1100fb_blank():
* Blank the display by setting all palette values to zero. Note, the
* Blank the display by setting all palette values to zero. Note, the
* 12 and 16 bpp modes don't really use the palette, so this will not
* blank the display in all modes.
* blank the display in all modes.
*/
static int sa1100fb_blank(int blank, struct fb_info *info)
{
@ -603,8 +603,8 @@ static inline unsigned int get_pcd(struct sa1100fb_info *fbi,
/*
* sa1100fb_activate_var():
* Configures LCD Controller based on entries in var parameter. Settings are
* only written to the controller if changes were made.
* Configures LCD Controller based on entries in var parameter. Settings are
* only written to the controller if changes were made.
*/
static int sa1100fb_activate_var(struct fb_var_screeninfo *var, struct sa1100fb_info *fbi)
{
@ -747,7 +747,7 @@ static void sa1100fb_setup_gpio(struct sa1100fb_info *fbi)
*
* SA1110 spec update nr. 25 says we can and should
* clear LDD15 to 12 for 4 or 8bpp modes with active
* panels.
* panels.
*/
if ((fbi->reg_lccr0 & LCCR0_CMS) == LCCR0_Color &&
(fbi->reg_lccr0 & (LCCR0_Dual|LCCR0_Act)) != 0) {
@ -1020,9 +1020,9 @@ static int sa1100fb_resume(struct platform_device *dev)
/*
* sa1100fb_map_video_memory():
* Allocates the DRAM memory for the frame buffer. This buffer is
* remapped into a non-cached, non-buffered, memory region to
* allow palette and pixel writes to occur without flushing the
* Allocates the DRAM memory for the frame buffer. This buffer is
* remapped into a non-cached, non-buffered, memory region to
* allow palette and pixel writes to occur without flushing the
* cache. Once this area is remapped, all virtual memory
* access to the video memory should occur at the new region.
*/

View File

@ -1,11 +1,11 @@
/*
* linux/drivers/video/stifb.c -
* Low level Frame buffer driver for HP workstations with
* linux/drivers/video/stifb.c -
* Low level Frame buffer driver for HP workstations with
* STI (standard text interface) video firmware.
*
* Copyright (C) 2001-2006 Helge Deller <deller@gmx.de>
* Portions Copyright (C) 2001 Thomas Bogendoerfer <tsbogend@alpha.franken.de>
*
*
* Based on:
* - linux/drivers/video/artistfb.c -- Artist frame buffer driver
* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
@ -14,7 +14,7 @@
* - HP Xhp cfb-based X11 window driver for XFree86
* (c)Copyright 1992 Hewlett-Packard Co.
*
*
*
* The following graphics display devices (NGLE family) are supported by this driver:
*
* HPA4070A known as "HCRX", a 1280x1024 color device with 8 planes
@ -30,7 +30,7 @@
* supports 1280x1024 color displays with 8 planes.
* HP710G same as HP710C, 1280x1024 grayscale only
* HP710L same as HP710C, 1024x768 color only
* HP712 internal graphics support on HP9000s712 SPU, supports 640x480,
* HP712 internal graphics support on HP9000s712 SPU, supports 640x480,
* 1024x768 or 1280x1024 color displays on 8 planes (Artist)
*
* This file is subject to the terms and conditions of the GNU General Public
@ -92,7 +92,7 @@ typedef struct {
__s32 misc_video_end;
} video_setup_t;
typedef struct {
typedef struct {
__s16 sizeof_ngle_data;
__s16 x_size_visible; /* visible screen dim in pixels */
__s16 y_size_visible;
@ -177,10 +177,10 @@ static int __initdata stifb_bpp_pref[MAX_STI_ROMS];
#endif /* DEBUG_STIFB_REGS */
#define ENABLE 1 /* for enabling/disabling screen */
#define ENABLE 1 /* for enabling/disabling screen */
#define DISABLE 0
#define NGLE_LOCK(fb_info) do { } while (0)
#define NGLE_LOCK(fb_info) do { } while (0)
#define NGLE_UNLOCK(fb_info) do { } while (0)
static void
@ -198,9 +198,9 @@ SETUP_HW(struct stifb_info *fb)
static void
SETUP_FB(struct stifb_info *fb)
{
{
unsigned int reg10_value = 0;
SETUP_HW(fb);
switch (fb->id)
{
@ -210,15 +210,15 @@ SETUP_FB(struct stifb_info *fb)
reg10_value = 0x13601000;
break;
case S9000_ID_A1439A:
if (fb->info.var.bits_per_pixel == 32)
if (fb->info.var.bits_per_pixel == 32)
reg10_value = 0xBBA0A000;
else
else
reg10_value = 0x13601000;
break;
case S9000_ID_HCRX:
if (fb->info.var.bits_per_pixel == 32)
reg10_value = 0xBBA0A000;
else
else
reg10_value = 0x13602000;
break;
case S9000_ID_TIMBER:
@ -243,7 +243,7 @@ START_IMAGE_COLORMAP_ACCESS(struct stifb_info *fb)
}
static void
WRITE_IMAGE_COLOR(struct stifb_info *fb, int index, int color)
WRITE_IMAGE_COLOR(struct stifb_info *fb, int index, int color)
{
SETUP_HW(fb);
WRITE_WORD(((0x100+index)<<2), fb, REG_3);
@ -251,30 +251,30 @@ WRITE_IMAGE_COLOR(struct stifb_info *fb, int index, int color)
}
static void
FINISH_IMAGE_COLORMAP_ACCESS(struct stifb_info *fb)
{
FINISH_IMAGE_COLORMAP_ACCESS(struct stifb_info *fb)
{
WRITE_WORD(0x400, fb, REG_2);
if (fb->info.var.bits_per_pixel == 32) {
WRITE_WORD(0x83000100, fb, REG_1);
} else {
if (fb->id == S9000_ID_ARTIST || fb->id == CRT_ID_VISUALIZE_EG)
WRITE_WORD(0x80000100, fb, REG_26);
else
else
WRITE_WORD(0x80000100, fb, REG_1);
}
SETUP_FB(fb);
}
static void
SETUP_RAMDAC(struct stifb_info *fb)
SETUP_RAMDAC(struct stifb_info *fb)
{
SETUP_HW(fb);
WRITE_WORD(0x04000000, fb, 0x1020);
WRITE_WORD(0xff000000, fb, 0x1028);
}
static void
CRX24_SETUP_RAMDAC(struct stifb_info *fb)
static void
CRX24_SETUP_RAMDAC(struct stifb_info *fb)
{
SETUP_HW(fb);
WRITE_WORD(0x04000000, fb, 0x1000);
@ -286,14 +286,14 @@ CRX24_SETUP_RAMDAC(struct stifb_info *fb)
}
#if 0
static void
static void
HCRX_SETUP_RAMDAC(struct stifb_info *fb)
{
WRITE_WORD(0xffffffff, fb, REG_32);
}
#endif
static void
static void
CRX24_SET_OVLY_MASK(struct stifb_info *fb)
{
SETUP_HW(fb);
@ -314,7 +314,7 @@ ENABLE_DISABLE_DISPLAY(struct stifb_info *fb, int enable)
WRITE_WORD(value, fb, 0x1038);
}
static void
static void
CRX24_ENABLE_DISABLE_DISPLAY(struct stifb_info *fb, int enable)
{
unsigned int value = enable ? 0x10000000 : 0x30000000;
@ -325,11 +325,11 @@ CRX24_ENABLE_DISABLE_DISPLAY(struct stifb_info *fb, int enable)
}
static void
ARTIST_ENABLE_DISABLE_DISPLAY(struct stifb_info *fb, int enable)
ARTIST_ENABLE_DISABLE_DISPLAY(struct stifb_info *fb, int enable)
{
u32 DregsMiscVideo = REG_21;
u32 DregsMiscCtl = REG_27;
SETUP_HW(fb);
if (enable) {
WRITE_WORD(READ_WORD(fb, DregsMiscVideo) | 0x0A000000, fb, DregsMiscVideo);
@ -344,7 +344,7 @@ ARTIST_ENABLE_DISABLE_DISPLAY(struct stifb_info *fb, int enable)
(READ_BYTE(fb, REG_16b3) - 1)
#define HYPER_CONFIG_PLANES_24 0x00000100
#define IS_24_DEVICE(fb) \
(fb->deviceSpecificConfig & HYPER_CONFIG_PLANES_24)
@ -470,15 +470,15 @@ SETUP_ATTR_ACCESS(struct stifb_info *fb, unsigned BufferNumber)
}
static void
SET_ATTR_SIZE(struct stifb_info *fb, int width, int height)
SET_ATTR_SIZE(struct stifb_info *fb, int width, int height)
{
/* REG_6 seems to have special values when run on a
/* REG_6 seems to have special values when run on a
RDI precisionbook parisc laptop (INTERNAL_EG_DX1024 or
INTERNAL_EG_X1024). The values are:
0x2f0: internal (LCD) & external display enabled
0x2a0: external display only
0x000: zero on standard artist graphic cards
*/
*/
WRITE_WORD(0x00000000, fb, REG_6);
WRITE_WORD((width<<16) | height, fb, REG_9);
WRITE_WORD(0x05000000, fb, REG_6);
@ -486,7 +486,7 @@ SET_ATTR_SIZE(struct stifb_info *fb, int width, int height)
}
static void
FINISH_ATTR_ACCESS(struct stifb_info *fb)
FINISH_ATTR_ACCESS(struct stifb_info *fb)
{
SETUP_HW(fb);
WRITE_WORD(0x00000000, fb, REG_12);
@ -499,7 +499,7 @@ elkSetupPlanes(struct stifb_info *fb)
SETUP_FB(fb);
}
static void
static void
ngleSetupAttrPlanes(struct stifb_info *fb, int BufferNumber)
{
SETUP_ATTR_ACCESS(fb, BufferNumber);
@ -519,7 +519,7 @@ rattlerSetupPlanes(struct stifb_info *fb)
* read mask register for overlay planes, not image planes).
*/
CRX24_SETUP_RAMDAC(fb);
/* change fb->id temporarily to fool SETUP_FB() */
saved_id = fb->id;
fb->id = CRX24_OVERLAY_PLANES;
@ -565,7 +565,7 @@ setNgleLutBltCtl(struct stifb_info *fb, int offsetWithinLut, int length)
lutBltCtl.all = 0x80000000;
lutBltCtl.fields.length = length;
switch (fb->id)
switch (fb->id)
{
case S9000_ID_A1439A: /* CRX24 */
if (fb->var.bits_per_pixel == 8) {
@ -576,12 +576,12 @@ setNgleLutBltCtl(struct stifb_info *fb, int offsetWithinLut, int length)
lutBltCtl.fields.lutOffset = 0 * 256;
}
break;
case S9000_ID_ARTIST:
lutBltCtl.fields.lutType = NGLE_CMAP_INDEXED0_TYPE;
lutBltCtl.fields.lutOffset = 0 * 256;
break;
default:
lutBltCtl.fields.lutType = NGLE_CMAP_INDEXED0_TYPE;
lutBltCtl.fields.lutOffset = 0;
@ -596,7 +596,7 @@ setNgleLutBltCtl(struct stifb_info *fb, int offsetWithinLut, int length)
#endif
static NgleLutBltCtl
setHyperLutBltCtl(struct stifb_info *fb, int offsetWithinLut, int length)
setHyperLutBltCtl(struct stifb_info *fb, int offsetWithinLut, int length)
{
NgleLutBltCtl lutBltCtl;
@ -633,7 +633,7 @@ static void hyperUndoITE(struct stifb_info *fb)
/* Hardware setup for full-depth write to "magic" location */
GET_FIFO_SLOTS(fb, nFreeFifoSlots, 7);
NGLE_QUICK_SET_DST_BM_ACCESS(fb,
NGLE_QUICK_SET_DST_BM_ACCESS(fb,
BA(IndexedDcd, Otc04, Ots08, AddrLong,
BAJustPoint(0), BINovly, BAIndexBase(0)));
NGLE_QUICK_SET_IMAGE_BITMAP_OP(fb,
@ -653,13 +653,13 @@ static void hyperUndoITE(struct stifb_info *fb)
NGLE_UNLOCK(fb);
}
static void
static void
ngleDepth8_ClearImagePlanes(struct stifb_info *fb)
{
/* FIXME! */
}
static void
static void
ngleDepth24_ClearImagePlanes(struct stifb_info *fb)
{
/* FIXME! */
@ -675,7 +675,7 @@ ngleResetAttrPlanes(struct stifb_info *fb, unsigned int ctlPlaneReg)
NGLE_LOCK(fb);
GET_FIFO_SLOTS(fb, nFreeFifoSlots, 4);
NGLE_QUICK_SET_DST_BM_ACCESS(fb,
NGLE_QUICK_SET_DST_BM_ACCESS(fb,
BA(IndexedDcd, Otc32, OtsIndirect,
AddrLong, BAJustPoint(0),
BINattr, BAIndexBase(0)));
@ -713,22 +713,22 @@ ngleResetAttrPlanes(struct stifb_info *fb, unsigned int ctlPlaneReg)
/**** Finally, set the Control Plane Register back to zero: ****/
GET_FIFO_SLOTS(fb, nFreeFifoSlots, 1);
NGLE_QUICK_SET_CTL_PLN_REG(fb, 0);
NGLE_UNLOCK(fb);
}
static void
ngleClearOverlayPlanes(struct stifb_info *fb, int mask, int data)
{
int nFreeFifoSlots = 0;
u32 packed_dst;
u32 packed_len;
NGLE_LOCK(fb);
/* Hardware setup */
GET_FIFO_SLOTS(fb, nFreeFifoSlots, 8);
NGLE_QUICK_SET_DST_BM_ACCESS(fb,
NGLE_QUICK_SET_DST_BM_ACCESS(fb,
BA(IndexedDcd, Otc04, Ots08, AddrLong,
BAJustPoint(0), BINovly, BAIndexBase(0)));
@ -736,23 +736,23 @@ ngleClearOverlayPlanes(struct stifb_info *fb, int mask, int data)
NGLE_REALLY_SET_IMAGE_FG_COLOR(fb, data);
NGLE_REALLY_SET_IMAGE_PLANEMASK(fb, mask);
packed_dst = 0;
packed_len = (fb->info.var.xres << 16) | fb->info.var.yres;
NGLE_SET_DSTXY(fb, packed_dst);
/* Write zeroes to overlay planes */
/* Write zeroes to overlay planes */
NGLE_QUICK_SET_IMAGE_BITMAP_OP(fb,
IBOvals(RopSrc, MaskAddrOffset(0),
BitmapExtent08, StaticReg(0),
DataDynamic, MaskOtc, BGx(0), FGx(0)));
SET_LENXY_START_RECFILL(fb, packed_len);
NGLE_UNLOCK(fb);
}
static void
static void
hyperResetPlanes(struct stifb_info *fb, int enable)
{
unsigned int controlPlaneReg;
@ -783,7 +783,7 @@ hyperResetPlanes(struct stifb_info *fb, int enable)
ngleClearOverlayPlanes(fb, 0xff, 255);
/**************************************************
** Also need to counteract ITE settings
** Also need to counteract ITE settings
**************************************************/
hyperUndoITE(fb);
break;
@ -803,13 +803,13 @@ hyperResetPlanes(struct stifb_info *fb, int enable)
ngleResetAttrPlanes(fb, controlPlaneReg);
break;
}
NGLE_UNLOCK(fb);
}
/* Return pointer to in-memory structure holding ELK device-dependent ROM values. */
static void
static void
ngleGetDeviceRomData(struct stifb_info *fb)
{
#if 0
@ -821,7 +821,7 @@ XXX: FIXME: !!!
char *pCard8;
int i;
char *mapOrigin = NULL;
int romTableIdx;
pPackedDevRomData = fb->ngle_rom;
@ -888,7 +888,7 @@ SETUP_HCRX(struct stifb_info *fb)
/* Initialize Hyperbowl registers */
GET_FIFO_SLOTS(fb, nFreeFifoSlots, 7);
if (IS_24_DEVICE(fb)) {
hyperbowl = (fb->info.var.bits_per_pixel == 32) ?
HYPERBOWL_MODE01_8_24_LUT0_TRANSPARENT_LUT1_OPAQUE :
@ -897,9 +897,9 @@ SETUP_HCRX(struct stifb_info *fb)
/* First write to Hyperbowl must happen twice (bug) */
WRITE_WORD(hyperbowl, fb, REG_40);
WRITE_WORD(hyperbowl, fb, REG_40);
WRITE_WORD(HYPERBOWL_MODE2_8_24, fb, REG_39);
WRITE_WORD(0x014c0148, fb, REG_42); /* Set lut 0 to be the direct color */
WRITE_WORD(0x404c4048, fb, REG_43);
WRITE_WORD(0x034c0348, fb, REG_44);
@ -990,7 +990,7 @@ stifb_setcolreg(u_int regno, u_int red, u_int green,
0, /* Offset w/i LUT */
256); /* Load entire LUT */
NGLE_BINC_SET_SRCADDR(fb,
NGLE_LONG_FB_ADDRESS(0, 0x100, 0));
NGLE_LONG_FB_ADDRESS(0, 0x100, 0));
/* 0x100 is same as used in WRITE_IMAGE_COLOR() */
START_COLORMAPLOAD(fb, lutBltCtl.all);
SETUP_FB(fb);
@ -1028,7 +1028,7 @@ stifb_blank(int blank_mode, struct fb_info *info)
ENABLE_DISABLE_DISPLAY(fb, enable);
break;
}
SETUP_FB(fb);
return 0;
}
@ -1114,15 +1114,15 @@ stifb_init_display(struct stifb_info *fb)
/* HCRX specific initialization */
SETUP_HCRX(fb);
/*
if (id == S9000_ID_HCRX)
hyperInitSprite(fb);
else
ngleInitSprite(fb);
*/
/* Initialize the image planes. */
/* Initialize the image planes. */
switch (id) {
case S9000_ID_HCRX:
hyperResetPlanes(fb, ENABLE);
@ -1194,7 +1194,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
fb = kzalloc(sizeof(*fb), GFP_ATOMIC);
if (!fb)
return -ENOMEM;
info = &fb->info;
/* set struct to a known state */
@ -1235,7 +1235,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
dev_name, fb->id);
goto out_err0;
}
/* default to 8 bpp on most graphic chips */
bpp = 8;
xres = sti_onscreen_x(fb->sti);
@ -1256,7 +1256,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
fb->id = S9000_ID_A1659A;
break;
case S9000_ID_TIMBER: /* HP9000/710 Any (may be a grayscale device) */
if (strstr(dev_name, "GRAYSCALE") ||
if (strstr(dev_name, "GRAYSCALE") ||
strstr(dev_name, "Grayscale") ||
strstr(dev_name, "grayscale"))
var->grayscale = 1;
@ -1295,16 +1295,16 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
case CRT_ID_VISUALIZE_EG:
case S9000_ID_ARTIST: /* Artist */
break;
default:
default:
#ifdef FALLBACK_TO_1BPP
printk(KERN_WARNING
printk(KERN_WARNING
"stifb: Unsupported graphics card (id=0x%08x) "
"- now trying 1bpp mode instead\n",
fb->id);
bpp = 1; /* default to 1 bpp */
break;
#else
printk(KERN_WARNING
printk(KERN_WARNING
"stifb: Unsupported graphics card (id=0x%08x) "
"- skipping.\n",
fb->id);
@ -1320,11 +1320,11 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
fix->line_length = (fb->sti->glob_cfg->total_x * bpp) / 8;
if (!fix->line_length)
fix->line_length = 2048; /* default */
/* limit fbsize to max visible screen size */
if (fix->smem_len > yres*fix->line_length)
fix->smem_len = ALIGN(yres*fix->line_length, 4*1024*1024);
fix->accel = FB_ACCEL_NONE;
switch (bpp) {
@ -1350,7 +1350,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
default:
break;
}
var->xres = var->xres_virtual = xres;
var->yres = var->yres_virtual = yres;
var->bits_per_pixel = bpp;
@ -1379,7 +1379,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
fix->smem_start, fix->smem_start+fix->smem_len);
goto out_err2;
}
if (!request_mem_region(fix->mmio_start, fix->mmio_len, "stifb mmio")) {
printk(KERN_ERR "stifb: cannot reserve sti mmio region 0x%04lx-0x%04lx\n",
fix->mmio_start, fix->mmio_start+fix->mmio_len);
@ -1393,11 +1393,11 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
fb_info(&fb->info, "%s %dx%d-%d frame buffer device, %s, id: %04x, mmio: 0x%04lx\n",
fix->id,
var->xres,
var->xres,
var->yres,
var->bits_per_pixel,
dev_name,
fb->id,
fb->id,
fix->mmio_start);
return 0;
@ -1413,6 +1413,7 @@ out_err1:
iounmap(info->screen_base);
out_err0:
kfree(fb);
sti->info = NULL;
return -ENXIO;
}
@ -1426,7 +1427,7 @@ static int __init stifb_init(void)
struct sti_struct *sti;
struct sti_struct *def_sti;
int i;
#ifndef MODULE
char *option = NULL;
@ -1438,7 +1439,7 @@ static int __init stifb_init(void)
printk(KERN_INFO "stifb: disabled by \"stifb=off\" kernel parameter\n");
return -ENXIO;
}
def_sti = sti_get_rom(0);
if (def_sti) {
for (i = 1; i <= MAX_STI_ROMS; i++) {
@ -1472,7 +1473,7 @@ stifb_cleanup(void)
{
struct sti_struct *sti;
int i;
for (i = 1; i <= MAX_STI_ROMS; i++) {
sti = sti_get_rom(i);
if (!sti)
@ -1495,10 +1496,10 @@ int __init
stifb_setup(char *options)
{
int i;
if (!options || !*options)
return 1;
if (strncmp(options, "off", 3) == 0) {
stifb_disabled = 1;
options += 3;

View File

@ -1,7 +1,7 @@
/*
* valkyriefb.c -- frame buffer device for the PowerMac 'valkyrie' display
*
* Created 8 August 1998 by
* Created 8 August 1998 by
* Martin Costabel <costabel@wanadoo.fr> and Kevin Schoedel
*
* Vmode-switching changes and vmode 15/17 modifications created 29 August
@ -77,13 +77,13 @@ struct fb_info_valkyrie {
struct fb_par_valkyrie par;
struct cmap_regs __iomem *cmap_regs;
unsigned long cmap_regs_phys;
struct valkyrie_regs __iomem *valkyrie_regs;
unsigned long valkyrie_regs_phys;
__u8 __iomem *frame_buffer;
unsigned long frame_buffer_phys;
int sense;
unsigned long total_vram;
@ -244,7 +244,7 @@ static inline int valkyrie_vram_reqd(int video_mode, int color_mode)
{
int pitch;
struct valkyrie_regvals *init = valkyrie_reg_init[video_mode-1];
if ((pitch = init->pitch[color_mode]) == 0)
pitch = 2 * init->pitch[0];
return init->vres * pitch;
@ -467,7 +467,7 @@ static int valkyrie_var_to_par(struct fb_var_screeninfo *var,
printk(KERN_ERR "valkyriefb: vmode %d not valid.\n", vmode);
return -EINVAL;
}
if (cmode != CMODE_8 && cmode != CMODE_16) {
printk(KERN_ERR "valkyriefb: cmode %d not valid.\n", cmode);
return -EINVAL;
@ -516,7 +516,7 @@ static void valkyrie_init_fix(struct fb_fix_screeninfo *fix, struct fb_info_valk
fix->ywrapstep = 0;
fix->ypanstep = 0;
fix->xpanstep = 0;
}
/* Fix must already be inited above */

View File

@ -111,7 +111,7 @@ static u_long get_line_length(int xres_virtual, int bpp)
* First part, xxxfb_check_var, must not write anything
* to hardware, it should only verify and adjust var.
* This means it doesn't alter par but it does use hardware
* data from it to check this var.
* data from it to check this var.
*/
static int vfb_check_var(struct fb_var_screeninfo *var,
@ -169,7 +169,7 @@ static int vfb_check_var(struct fb_var_screeninfo *var,
/*
* Now that we checked it we alter var. The reason being is that the video
* mode passed in might not work but slight changes to it might make it
* mode passed in might not work but slight changes to it might make it
* work. This way we let the user know what is acceptable.
*/
switch (var->bits_per_pixel) {
@ -235,8 +235,8 @@ static int vfb_check_var(struct fb_var_screeninfo *var,
}
/* This routine actually sets the video mode. It's in here where we
* the hardware state info->par and fix which can be affected by the
* change in par. For this driver it doesn't do much.
* the hardware state info->par and fix which can be affected by the
* change in par. For this driver it doesn't do much.
*/
static int vfb_set_par(struct fb_info *info)
{
@ -379,7 +379,7 @@ static int vfb_pan_display(struct fb_var_screeninfo *var,
}
/*
* Most drivers don't need their own mmap function
* Most drivers don't need their own mmap function
*/
static int vfb_mmap(struct fb_info *info,

View File

@ -45,7 +45,8 @@ static int check_extent_in_eb(struct btrfs_backref_walk_ctx *ctx,
int root_count;
bool cached;
if (!btrfs_file_extent_compression(eb, fi) &&
if (!ctx->ignore_extent_item_pos &&
!btrfs_file_extent_compression(eb, fi) &&
!btrfs_file_extent_encryption(eb, fi) &&
!btrfs_file_extent_other_encoding(eb, fi)) {
u64 data_offset;
@ -552,7 +553,7 @@ static int add_all_parents(struct btrfs_backref_walk_ctx *ctx,
count++;
else
goto next;
if (!ctx->ignore_extent_item_pos) {
if (!ctx->skip_inode_ref_list) {
ret = check_extent_in_eb(ctx, &key, eb, fi, &eie);
if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP ||
ret < 0)
@ -564,7 +565,7 @@ static int add_all_parents(struct btrfs_backref_walk_ctx *ctx,
eie, (void **)&old, GFP_NOFS);
if (ret < 0)
break;
if (!ret && !ctx->ignore_extent_item_pos) {
if (!ret && !ctx->skip_inode_ref_list) {
while (old->next)
old = old->next;
old->next = eie;
@ -1606,7 +1607,7 @@ again:
goto out;
}
if (ref->count && ref->parent) {
if (!ctx->ignore_extent_item_pos && !ref->inode_list &&
if (!ctx->skip_inode_ref_list && !ref->inode_list &&
ref->level == 0) {
struct btrfs_tree_parent_check check = { 0 };
struct extent_buffer *eb;
@ -1647,7 +1648,7 @@ again:
(void **)&eie, GFP_NOFS);
if (ret < 0)
goto out;
if (!ret && !ctx->ignore_extent_item_pos) {
if (!ret && !ctx->skip_inode_ref_list) {
/*
* We've recorded that parent, so we must extend
* its inode list here.
@ -1743,7 +1744,7 @@ int btrfs_find_all_leafs(struct btrfs_backref_walk_ctx *ctx)
static int btrfs_find_all_roots_safe(struct btrfs_backref_walk_ctx *ctx)
{
const u64 orig_bytenr = ctx->bytenr;
const bool orig_ignore_extent_item_pos = ctx->ignore_extent_item_pos;
const bool orig_skip_inode_ref_list = ctx->skip_inode_ref_list;
bool roots_ulist_allocated = false;
struct ulist_iterator uiter;
int ret = 0;
@ -1764,7 +1765,7 @@ static int btrfs_find_all_roots_safe(struct btrfs_backref_walk_ctx *ctx)
roots_ulist_allocated = true;
}
ctx->ignore_extent_item_pos = true;
ctx->skip_inode_ref_list = true;
ULIST_ITER_INIT(&uiter);
while (1) {
@ -1789,7 +1790,7 @@ static int btrfs_find_all_roots_safe(struct btrfs_backref_walk_ctx *ctx)
ulist_free(ctx->refs);
ctx->refs = NULL;
ctx->bytenr = orig_bytenr;
ctx->ignore_extent_item_pos = orig_ignore_extent_item_pos;
ctx->skip_inode_ref_list = orig_skip_inode_ref_list;
return ret;
}
@ -1912,7 +1913,7 @@ int btrfs_is_data_extent_shared(struct btrfs_inode *inode, u64 bytenr,
goto out_trans;
}
walk_ctx.ignore_extent_item_pos = true;
walk_ctx.skip_inode_ref_list = true;
walk_ctx.trans = trans;
walk_ctx.fs_info = fs_info;
walk_ctx.refs = &ctx->refs;

Some files were not shown because too many files have changed in this diff Show More