Merge branch 'akpm'

* akpm: (173 commits)
  genalloc: use bitmap_find_next_zero_area
  ia64: use bitmap_find_next_zero_area
  sparc: use bitmap_find_next_zero_area
  mlx4: use bitmap_find_next_zero_area
  isp1362-hcd: use bitmap_find_next_zero_area
  iommu-helper: use bitmap library
  bitmap: introduce bitmap_set, bitmap_clear, bitmap_find_next_zero_area
  qnx4: use hweight8
  qnx4fs: remove remains of the (defunct) write support
  resource: constify arg to resource_size() and resource_type()
  gru: send cross partition interrupts using the gru
  gru: function to generate chipset IPI values
  gru: update driver version number
  gru: improve GRU TLB dropin statistics
  gru: fix GRU interrupt race at deallocate
  gru: add hugepage support
  gru: fix bug in allocation of kernel contexts
  gru: update GRU structures to match latest hardware spec
  gru: check for correct GRU chiplet assignment
  gru: remove stray local_irq_enable
  ...
This commit is contained in:
Linus Torvalds 2009-12-16 10:06:39 -08:00
commit 9cfc86249f
216 changed files with 7063 additions and 3290 deletions

View file

@ -8,7 +8,7 @@
DOCBOOKS := z8530book.xml mcabook.xml device-drivers.xml \
kernel-hacking.xml kernel-locking.xml deviceiobook.xml \
procfs-guide.xml writing_usb_driver.xml networking.xml \
writing_usb_driver.xml networking.xml \
kernel-api.xml filesystems.xml lsm.xml usb.xml kgdb.xml \
gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \
genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
@ -65,7 +65,7 @@ KERNELDOC = $(srctree)/scripts/kernel-doc
DOCPROC = $(objtree)/scripts/basic/docproc
XMLTOFLAGS = -m $(srctree)/Documentation/DocBook/stylesheet.xsl
#XMLTOFLAGS += --skip-validation
XMLTOFLAGS += --skip-validation
###
# DOCPROC is used for two purposes:
@ -101,17 +101,6 @@ endif
# Changes in kernel-doc force a rebuild of all documentation
$(BOOKS): $(KERNELDOC)
###
# procfs guide uses a .c file as example code.
# This requires an explicit dependency
C-procfs-example = procfs_example.xml
C-procfs-example2 = $(addprefix $(obj)/,$(C-procfs-example))
$(obj)/procfs-guide.xml: $(C-procfs-example2)
# List of programs to build
##oops, this is a kernel module::hostprogs-y := procfs_example
obj-m += procfs_example.o
# Tell kbuild to always build the programs
always := $(hostprogs-y)
@ -238,7 +227,7 @@ clean-files := $(DOCBOOKS) \
$(patsubst %.xml, %.pdf, $(DOCBOOKS)) \
$(patsubst %.xml, %.html, $(DOCBOOKS)) \
$(patsubst %.xml, %.9, $(DOCBOOKS)) \
$(C-procfs-example) $(index)
$(index)
clean-dirs := $(patsubst %.xml,%,$(DOCBOOKS)) man

View file

@ -1,626 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
"http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" [
<!ENTITY procfsexample SYSTEM "procfs_example.xml">
]>
<book id="LKProcfsGuide">
<bookinfo>
<title>Linux Kernel Procfs Guide</title>
<authorgroup>
<author>
<firstname>Erik</firstname>
<othername>(J.A.K.)</othername>
<surname>Mouw</surname>
<affiliation>
<address>
<email>mouw@nl.linux.org</email>
</address>
</affiliation>
</author>
<othercredit>
<contrib>
This software and documentation were written while working on the
LART computing board
(<ulink url="http://www.lartmaker.nl/">http://www.lartmaker.nl/</ulink>),
which was sponsored by the Delt University of Technology projects
Mobile Multi-media Communications and Ubiquitous Communications.
</contrib>
</othercredit>
</authorgroup>
<revhistory>
<revision>
<revnumber>1.0</revnumber>
<date>May 30, 2001</date>
<revremark>Initial revision posted to linux-kernel</revremark>
</revision>
<revision>
<revnumber>1.1</revnumber>
<date>June 3, 2001</date>
<revremark>Revised after comments from linux-kernel</revremark>
</revision>
</revhistory>
<copyright>
<year>2001</year>
<holder>Erik Mouw</holder>
</copyright>
<legalnotice>
<para>
This documentation is free software; you can redistribute it
and/or modify it under the terms of the GNU General Public
License as published by the Free Software Foundation; either
version 2 of the License, or (at your option) any later
version.
</para>
<para>
This documentation is distributed in the hope that it will be
useful, but WITHOUT ANY WARRANTY; without even the implied
warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the GNU General Public License for more details.
</para>
<para>
You should have received a copy of the GNU General Public
License along with this program; if not, write to the Free
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
MA 02111-1307 USA
</para>
<para>
For more details see the file COPYING in the source
distribution of Linux.
</para>
</legalnotice>
</bookinfo>
<toc>
</toc>
<preface id="Preface">
<title>Preface</title>
<para>
This guide describes the use of the procfs file system from
within the Linux kernel. The idea to write this guide came up on
the #kernelnewbies IRC channel (see <ulink
url="http://www.kernelnewbies.org/">http://www.kernelnewbies.org/</ulink>),
when Jeff Garzik explained the use of procfs and forwarded me a
message Alexander Viro wrote to the linux-kernel mailing list. I
agreed to write it up nicely, so here it is.
</para>
<para>
I'd like to thank Jeff Garzik
<email>jgarzik@pobox.com</email> and Alexander Viro
<email>viro@parcelfarce.linux.theplanet.co.uk</email> for their input,
Tim Waugh <email>twaugh@redhat.com</email> for his <ulink
url="http://people.redhat.com/twaugh/docbook/selfdocbook/">Selfdocbook</ulink>,
and Marc Joosen <email>marcj@historia.et.tudelft.nl</email> for
proofreading.
</para>
<para>
Erik
</para>
</preface>
<chapter id="intro">
<title>Introduction</title>
<para>
The <filename class="directory">/proc</filename> file system
(procfs) is a special file system in the linux kernel. It's a
virtual file system: it is not associated with a block device
but exists only in memory. The files in the procfs are there to
allow userland programs access to certain information from the
kernel (like process information in <filename
class="directory">/proc/[0-9]+/</filename>), but also for debug
purposes (like <filename>/proc/ksyms</filename>).
</para>
<para>
This guide describes the use of the procfs file system from
within the Linux kernel. It starts by introducing all relevant
functions to manage the files within the file system. After that
it shows how to communicate with userland, and some tips and
tricks will be pointed out. Finally a complete example will be
shown.
</para>
<para>
Note that the files in <filename
class="directory">/proc/sys</filename> are sysctl files: they
don't belong to procfs and are governed by a completely
different API described in the Kernel API book.
</para>
</chapter>
<chapter id="managing">
<title>Managing procfs entries</title>
<para>
This chapter describes the functions that various kernel
components use to populate the procfs with files, symlinks,
device nodes, and directories.
</para>
<para>
A minor note before we start: if you want to use any of the
procfs functions, be sure to include the correct header file!
This should be one of the first lines in your code:
</para>
<programlisting>
#include &lt;linux/proc_fs.h&gt;
</programlisting>
<sect1 id="regularfile">
<title>Creating a regular file</title>
<funcsynopsis>
<funcprototype>
<funcdef>struct proc_dir_entry* <function>create_proc_entry</function></funcdef>
<paramdef>const char* <parameter>name</parameter></paramdef>
<paramdef>mode_t <parameter>mode</parameter></paramdef>
<paramdef>struct proc_dir_entry* <parameter>parent</parameter></paramdef>
</funcprototype>
</funcsynopsis>
<para>
This function creates a regular file with the name
<parameter>name</parameter>, file mode
<parameter>mode</parameter> in the directory
<parameter>parent</parameter>. To create a file in the root of
the procfs, use <constant>NULL</constant> as
<parameter>parent</parameter> parameter. When successful, the
function will return a pointer to the freshly created
<structname>struct proc_dir_entry</structname>; otherwise it
will return <constant>NULL</constant>. <xref
linkend="userland"/> describes how to do something useful with
regular files.
</para>
<para>
Note that it is specifically supported that you can pass a
path that spans multiple directories. For example
<function>create_proc_entry</function>(<parameter>"drivers/via0/info"</parameter>)
will create the <filename class="directory">via0</filename>
directory if necessary, with standard
<constant>0755</constant> permissions.
</para>
<para>
If you only want to be able to read the file, the function
<function>create_proc_read_entry</function> described in <xref
linkend="convenience"/> may be used to create and initialise
the procfs entry in one single call.
</para>
</sect1>
<sect1 id="Creating_a_symlink">
<title>Creating a symlink</title>
<funcsynopsis>
<funcprototype>
<funcdef>struct proc_dir_entry*
<function>proc_symlink</function></funcdef> <paramdef>const
char* <parameter>name</parameter></paramdef>
<paramdef>struct proc_dir_entry*
<parameter>parent</parameter></paramdef> <paramdef>const
char* <parameter>dest</parameter></paramdef>
</funcprototype>
</funcsynopsis>
<para>
This creates a symlink in the procfs directory
<parameter>parent</parameter> that points from
<parameter>name</parameter> to
<parameter>dest</parameter>. This translates in userland to
<literal>ln -s</literal> <parameter>dest</parameter>
<parameter>name</parameter>.
</para>
</sect1>
<sect1 id="Creating_a_directory">
<title>Creating a directory</title>
<funcsynopsis>
<funcprototype>
<funcdef>struct proc_dir_entry* <function>proc_mkdir</function></funcdef>
<paramdef>const char* <parameter>name</parameter></paramdef>
<paramdef>struct proc_dir_entry* <parameter>parent</parameter></paramdef>
</funcprototype>
</funcsynopsis>
<para>
Create a directory <parameter>name</parameter> in the procfs
directory <parameter>parent</parameter>.
</para>
</sect1>
<sect1 id="Removing_an_entry">
<title>Removing an entry</title>
<funcsynopsis>
<funcprototype>
<funcdef>void <function>remove_proc_entry</function></funcdef>
<paramdef>const char* <parameter>name</parameter></paramdef>
<paramdef>struct proc_dir_entry* <parameter>parent</parameter></paramdef>
</funcprototype>
</funcsynopsis>
<para>
Removes the entry <parameter>name</parameter> in the directory
<parameter>parent</parameter> from the procfs. Entries are
removed by their <emphasis>name</emphasis>, not by the
<structname>struct proc_dir_entry</structname> returned by the
various create functions. Note that this function doesn't
recursively remove entries.
</para>
<para>
Be sure to free the <structfield>data</structfield> entry from
the <structname>struct proc_dir_entry</structname> before
<function>remove_proc_entry</function> is called (that is: if
there was some <structfield>data</structfield> allocated, of
course). See <xref linkend="usingdata"/> for more information
on using the <structfield>data</structfield> entry.
</para>
</sect1>
</chapter>
<chapter id="userland">
<title>Communicating with userland</title>
<para>
Instead of reading (or writing) information directly from
kernel memory, procfs works with <emphasis>call back
functions</emphasis> for files: functions that are called when
a specific file is being read or written. Such functions have
to be initialised after the procfs file is created by setting
the <structfield>read_proc</structfield> and/or
<structfield>write_proc</structfield> fields in the
<structname>struct proc_dir_entry*</structname> that the
function <function>create_proc_entry</function> returned:
</para>
<programlisting>
struct proc_dir_entry* entry;
entry->read_proc = read_proc_foo;
entry->write_proc = write_proc_foo;
</programlisting>
<para>
If you only want to use a the
<structfield>read_proc</structfield>, the function
<function>create_proc_read_entry</function> described in <xref
linkend="convenience"/> may be used to create and initialise the
procfs entry in one single call.
</para>
<sect1 id="Reading_data">
<title>Reading data</title>
<para>
The read function is a call back function that allows userland
processes to read data from the kernel. The read function
should have the following format:
</para>
<funcsynopsis>
<funcprototype>
<funcdef>int <function>read_func</function></funcdef>
<paramdef>char* <parameter>buffer</parameter></paramdef>
<paramdef>char** <parameter>start</parameter></paramdef>
<paramdef>off_t <parameter>off</parameter></paramdef>
<paramdef>int <parameter>count</parameter></paramdef>
<paramdef>int* <parameter>peof</parameter></paramdef>
<paramdef>void* <parameter>data</parameter></paramdef>
</funcprototype>
</funcsynopsis>
<para>
The read function should write its information into the
<parameter>buffer</parameter>, which will be exactly
<literal>PAGE_SIZE</literal> bytes long.
</para>
<para>
The parameter
<parameter>peof</parameter> should be used to signal that the
end of the file has been reached by writing
<literal>1</literal> to the memory location
<parameter>peof</parameter> points to.
</para>
<para>
The <parameter>data</parameter>
parameter can be used to create a single call back function for
several files, see <xref linkend="usingdata"/>.
</para>
<para>
The rest of the parameters and the return value are described
by a comment in <filename>fs/proc/generic.c</filename> as follows:
</para>
<blockquote>
<para>
You have three ways to return data:
</para>
<orderedlist>
<listitem>
<para>
Leave <literal>*start = NULL</literal>. (This is the default.)
Put the data of the requested offset at that
offset within the buffer. Return the number (<literal>n</literal>)
of bytes there are from the beginning of the
buffer up to the last byte of data. If the
number of supplied bytes (<literal>= n - offset</literal>) is
greater than zero and you didn't signal eof
and the reader is prepared to take more data
you will be called again with the requested
offset advanced by the number of bytes
absorbed. This interface is useful for files
no larger than the buffer.
</para>
</listitem>
<listitem>
<para>
Set <literal>*start</literal> to an unsigned long value less than
the buffer address but greater than zero.
Put the data of the requested offset at the
beginning of the buffer. Return the number of
bytes of data placed there. If this number is
greater than zero and you didn't signal eof
and the reader is prepared to take more data
you will be called again with the requested
offset advanced by <literal>*start</literal>. This interface is
useful when you have a large file consisting
of a series of blocks which you want to count
and return as wholes.
(Hack by Paul.Russell@rustcorp.com.au)
</para>
</listitem>
<listitem>
<para>
Set <literal>*start</literal> to an address within the buffer.
Put the data of the requested offset at <literal>*start</literal>.
Return the number of bytes of data placed there.
If this number is greater than zero and you
didn't signal eof and the reader is prepared to
take more data you will be called again with the
requested offset advanced by the number of bytes
absorbed.
</para>
</listitem>
</orderedlist>
</blockquote>
<para>
<xref linkend="example"/> shows how to use a read call back
function.
</para>
</sect1>
<sect1 id="Writing_data">
<title>Writing data</title>
<para>
The write call back function allows a userland process to write
data to the kernel, so it has some kind of control over the
kernel. The write function should have the following format:
</para>
<funcsynopsis>
<funcprototype>
<funcdef>int <function>write_func</function></funcdef>
<paramdef>struct file* <parameter>file</parameter></paramdef>
<paramdef>const char* <parameter>buffer</parameter></paramdef>
<paramdef>unsigned long <parameter>count</parameter></paramdef>
<paramdef>void* <parameter>data</parameter></paramdef>
</funcprototype>
</funcsynopsis>
<para>
The write function should read <parameter>count</parameter>
bytes at maximum from the <parameter>buffer</parameter>. Note
that the <parameter>buffer</parameter> doesn't live in the
kernel's memory space, so it should first be copied to kernel
space with <function>copy_from_user</function>. The
<parameter>file</parameter> parameter is usually
ignored. <xref linkend="usingdata"/> shows how to use the
<parameter>data</parameter> parameter.
</para>
<para>
Again, <xref linkend="example"/> shows how to use this call back
function.
</para>
</sect1>
<sect1 id="usingdata">
<title>A single call back for many files</title>
<para>
When a large number of almost identical files is used, it's
quite inconvenient to use a separate call back function for
each file. A better approach is to have a single call back
function that distinguishes between the files by using the
<structfield>data</structfield> field in <structname>struct
proc_dir_entry</structname>. First of all, the
<structfield>data</structfield> field has to be initialised:
</para>
<programlisting>
struct proc_dir_entry* entry;
struct my_file_data *file_data;
file_data = kmalloc(sizeof(struct my_file_data), GFP_KERNEL);
entry->data = file_data;
</programlisting>
<para>
The <structfield>data</structfield> field is a <type>void
*</type>, so it can be initialised with anything.
</para>
<para>
Now that the <structfield>data</structfield> field is set, the
<function>read_proc</function> and
<function>write_proc</function> can use it to distinguish
between files because they get it passed into their
<parameter>data</parameter> parameter:
</para>
<programlisting>
int foo_read_func(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
int len;
if(data == file_data) {
/* special case for this file */
} else {
/* normal processing */
}
return len;
}
</programlisting>
<para>
Be sure to free the <structfield>data</structfield> data field
when removing the procfs entry.
</para>
</sect1>
</chapter>
<chapter id="tips">
<title>Tips and tricks</title>
<sect1 id="convenience">
<title>Convenience functions</title>
<funcsynopsis>
<funcprototype>
<funcdef>struct proc_dir_entry* <function>create_proc_read_entry</function></funcdef>
<paramdef>const char* <parameter>name</parameter></paramdef>
<paramdef>mode_t <parameter>mode</parameter></paramdef>
<paramdef>struct proc_dir_entry* <parameter>parent</parameter></paramdef>
<paramdef>read_proc_t* <parameter>read_proc</parameter></paramdef>
<paramdef>void* <parameter>data</parameter></paramdef>
</funcprototype>
</funcsynopsis>
<para>
This function creates a regular file in exactly the same way
as <function>create_proc_entry</function> from <xref
linkend="regularfile"/> does, but also allows to set the read
function <parameter>read_proc</parameter> in one call. This
function can set the <parameter>data</parameter> as well, like
explained in <xref linkend="usingdata"/>.
</para>
</sect1>
<sect1 id="Modules">
<title>Modules</title>
<para>
If procfs is being used from within a module, be sure to set
the <structfield>owner</structfield> field in the
<structname>struct proc_dir_entry</structname> to
<constant>THIS_MODULE</constant>.
</para>
<programlisting>
struct proc_dir_entry* entry;
entry->owner = THIS_MODULE;
</programlisting>
</sect1>
<sect1 id="Mode_and_ownership">
<title>Mode and ownership</title>
<para>
Sometimes it is useful to change the mode and/or ownership of
a procfs entry. Here is an example that shows how to achieve
that:
</para>
<programlisting>
struct proc_dir_entry* entry;
entry->mode = S_IWUSR |S_IRUSR | S_IRGRP | S_IROTH;
entry->uid = 0;
entry->gid = 100;
</programlisting>
</sect1>
</chapter>
<chapter id="example">
<title>Example</title>
<!-- be careful with the example code: it shouldn't be wider than
approx. 60 columns, or otherwise it won't fit properly on a page
-->
&procfsexample;
</chapter>
</book>

View file

@ -1,201 +0,0 @@
/*
* procfs_example.c: an example proc interface
*
* Copyright (C) 2001, Erik Mouw (mouw@nl.linux.org)
*
* This file accompanies the procfs-guide in the Linux kernel
* source. Its main use is to demonstrate the concepts and
* functions described in the guide.
*
* This software has been developed while working on the LART
* computing board (http://www.lartmaker.nl), which was sponsored
* by the Delt University of Technology projects Mobile Multi-media
* Communications and Ubiquitous Communications.
*
* This program is free software; you can redistribute
* it and/or modify it under the terms of the GNU General
* Public License as published by the Free Software
* Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place,
* Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/proc_fs.h>
#include <linux/jiffies.h>
#include <asm/uaccess.h>
#define MODULE_VERS "1.0"
#define MODULE_NAME "procfs_example"
#define FOOBAR_LEN 8
struct fb_data_t {
char name[FOOBAR_LEN + 1];
char value[FOOBAR_LEN + 1];
};
static struct proc_dir_entry *example_dir, *foo_file,
*bar_file, *jiffies_file, *symlink;
struct fb_data_t foo_data, bar_data;
static int proc_read_jiffies(char *page, char **start,
off_t off, int count,
int *eof, void *data)
{
int len;
len = sprintf(page, "jiffies = %ld\n",
jiffies);
return len;
}
static int proc_read_foobar(char *page, char **start,
off_t off, int count,
int *eof, void *data)
{
int len;
struct fb_data_t *fb_data = (struct fb_data_t *)data;
/* DON'T DO THAT - buffer overruns are bad */
len = sprintf(page, "%s = '%s'\n",
fb_data->name, fb_data->value);
return len;
}
static int proc_write_foobar(struct file *file,
const char *buffer,
unsigned long count,
void *data)
{
int len;
struct fb_data_t *fb_data = (struct fb_data_t *)data;
if(count > FOOBAR_LEN)
len = FOOBAR_LEN;
else
len = count;
if(copy_from_user(fb_data->value, buffer, len))
return -EFAULT;
fb_data->value[len] = '\0';
return len;
}
static int __init init_procfs_example(void)
{
int rv = 0;
/* create directory */
example_dir = proc_mkdir(MODULE_NAME, NULL);
if(example_dir == NULL) {
rv = -ENOMEM;
goto out;
}
/* create jiffies using convenience function */
jiffies_file = create_proc_read_entry("jiffies",
0444, example_dir,
proc_read_jiffies,
NULL);
if(jiffies_file == NULL) {
rv = -ENOMEM;
goto no_jiffies;
}
/* create foo and bar files using same callback
* functions
*/
foo_file = create_proc_entry("foo", 0644, example_dir);
if(foo_file == NULL) {
rv = -ENOMEM;
goto no_foo;
}
strcpy(foo_data.name, "foo");
strcpy(foo_data.value, "foo");
foo_file->data = &foo_data;
foo_file->read_proc = proc_read_foobar;
foo_file->write_proc = proc_write_foobar;
bar_file = create_proc_entry("bar", 0644, example_dir);
if(bar_file == NULL) {
rv = -ENOMEM;
goto no_bar;
}
strcpy(bar_data.name, "bar");
strcpy(bar_data.value, "bar");
bar_file->data = &bar_data;
bar_file->read_proc = proc_read_foobar;
bar_file->write_proc = proc_write_foobar;
/* create symlink */
symlink = proc_symlink("jiffies_too", example_dir,
"jiffies");
if(symlink == NULL) {
rv = -ENOMEM;
goto no_symlink;
}
/* everything OK */
printk(KERN_INFO "%s %s initialised\n",
MODULE_NAME, MODULE_VERS);
return 0;
no_symlink:
remove_proc_entry("bar", example_dir);
no_bar:
remove_proc_entry("foo", example_dir);
no_foo:
remove_proc_entry("jiffies", example_dir);
no_jiffies:
remove_proc_entry(MODULE_NAME, NULL);
out:
return rv;
}
static void __exit cleanup_procfs_example(void)
{
remove_proc_entry("jiffies_too", example_dir);
remove_proc_entry("bar", example_dir);
remove_proc_entry("foo", example_dir);
remove_proc_entry("jiffies", example_dir);
remove_proc_entry(MODULE_NAME, NULL);
printk(KERN_INFO "%s %s removed\n",
MODULE_NAME, MODULE_VERS);
}
module_init(init_procfs_example);
module_exit(cleanup_procfs_example);
MODULE_AUTHOR("Erik Mouw");
MODULE_DESCRIPTION("procfs examples");
MODULE_LICENSE("GPL");

View file

@ -15,7 +15,7 @@ kernel patches.
2: Passes allnoconfig, allmodconfig
3: Builds on multiple CPU architectures by using local cross-compile tools
or something like PLM at OSDL.
or some other build farm.
4: ppc64 is a good architecture for cross-compilation checking because it
tends to use `unsigned long' for 64-bit quantities.
@ -88,3 +88,6 @@ kernel patches.
24: All memory barriers {e.g., barrier(), rmb(), wmb()} need a comment in the
source code that explains the logic of what they are doing and why.
25: If any ioctl's are added by the patch, then also update
Documentation/ioctl/ioctl-number.txt.

View file

@ -7,7 +7,7 @@
VIA UniChrome Family(CLE266, PM800 / CN400 / CN300,
P4M800CE / P4M800Pro / CN700 / VN800,
CX700 / VX700, K8M890, P4M890,
CN896 / P4M900, VX800)
CN896 / P4M900, VX800, VX855)
[Driver features]
------------------------
@ -154,13 +154,6 @@
0 : No Dual Edge Panel (default)
1 : Dual Edge Panel
viafb_video_dev:
This option is used to specify video output devices(CRT, DVI, LCD) for
duoview case.
For example:
To output video on DVI, we should use:
modprobe viafb viafb_video_dev=DVI...
viafb_lcd_port:
This option is used to specify LCD output port,
available values are "DVP0" "DVP1" "DFP_HIGHLOW" "DFP_HIGH" "DFP_LOW".
@ -181,9 +174,6 @@ Notes:
and bpp, need to call VIAFB specified ioctl interface VIAFB_SET_DEVICE
instead of calling common ioctl function FBIOPUT_VSCREENINFO since
viafb doesn't support multi-head well, or it will cause screen crush.
4. VX800 2D accelerator hasn't been supported in this driver yet. When
using driver on VX800, the driver will disable the acceleration
function as default.
[Configure viafb with "fbset" tool]

View file

@ -248,9 +248,7 @@ code, that is done in the initialization code in the usual way:
{
struct proc_dir_entry *entry;
entry = create_proc_entry("sequence", 0, NULL);
if (entry)
entry->proc_fops = &ct_file_ops;
proc_create("sequence", 0, NULL, &ct_file_ops);
return 0;
}

View file

@ -531,6 +531,13 @@ and have the following read/write attributes:
This file exists only if the pin can be configured as an
interrupt generating input pin.
"active_low" ... reads as either 0 (false) or 1 (true). Write
any nonzero value to invert the value attribute both
for reading and writing. Existing and subsequent
poll(2) support configuration via the edge attribute
for "rising" and "falling" edges will follow this
setting.
GPIO controllers have paths like /sys/class/gpio/gpiochip42/ (for the
controller implementing GPIOs starting at #42) and have the following
read-only attributes:
@ -566,6 +573,8 @@ requested using gpio_request():
int gpio_export_link(struct device *dev, const char *name,
unsigned gpio)
/* change the polarity of a GPIO node in sysfs */
int gpio_sysfs_set_active_low(unsigned gpio, int value);
After a kernel driver requests a GPIO, it may only be made available in
the sysfs interface by gpio_export(). The driver can control whether the
@ -580,3 +589,9 @@ After the GPIO has been exported, gpio_export_link() allows creating
symlinks from elsewhere in sysfs to the GPIO sysfs node. Drivers can
use this to provide the interface under their own device in sysfs with
a descriptive name.
Drivers can use gpio_sysfs_set_active_low() to hide GPIO line polarity
differences between boards from user space. This only affects the
sysfs interface. Polarity change can be done both before and after
gpio_export(), and previously enabled poll(2) support for either
rising or falling edge will be reconfigured to follow this setting.

View file

@ -2729,6 +2729,11 @@ and is between 256 and 4096 characters. It is defined in the file
vmpoff= [KNL,S390] Perform z/VM CP command after power off.
Format: <command>
vt.cur_default= [VT] Default cursor shape.
Format: 0xCCBBAA, where AA, BB, and CC are the same as
the parameters of the <Esc>[?A;B;Cc escape sequence;
see VGA-softcursor.txt. Default: 2 = underline.
vt.default_blu= [VT]
Format: <blue0>,<blue1>,<blue2>,...,<blue15>
Change the default blue palette of the console.

View file

@ -81,7 +81,6 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#define ELF_DATA ELFDATA2LSB
#define ELF_ARCH EM_ALPHA
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 8192
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical

View file

@ -101,7 +101,6 @@ extern int arm_elf_read_implies_exec(const struct elf32_hdr *, int);
int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
#define ELF_CORE_COPY_TASK_REGS dump_task_regs
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical

View file

@ -339,6 +339,15 @@ static struct davinci_mmc_config da850_mmc_config = {
.version = MMC_CTLR_VERSION_2,
};
static void da850_panel_power_ctrl(int val)
{
/* lcd backlight */
gpio_set_value(DA850_LCD_BL_PIN, val);
/* lcd power */
gpio_set_value(DA850_LCD_PWR_PIN, val);
}
static int da850_lcd_hw_init(void)
{
int status;
@ -356,17 +365,11 @@ static int da850_lcd_hw_init(void)
gpio_direction_output(DA850_LCD_BL_PIN, 0);
gpio_direction_output(DA850_LCD_PWR_PIN, 0);
/* disable lcd backlight */
gpio_set_value(DA850_LCD_BL_PIN, 0);
/* Switch off panel power and backlight */
da850_panel_power_ctrl(0);
/* disable lcd power */
gpio_set_value(DA850_LCD_PWR_PIN, 0);
/* enable lcd power */
gpio_set_value(DA850_LCD_PWR_PIN, 1);
/* enable lcd backlight */
gpio_set_value(DA850_LCD_BL_PIN, 1);
/* Switch on panel power and backlight */
da850_panel_power_ctrl(1);
return 0;
}
@ -674,6 +677,7 @@ static __init void da850_evm_init(void)
pr_warning("da850_evm_init: lcd initialization failed: %d\n",
ret);
sharp_lk043t1dg01_pdata.panel_power_ctrl = da850_panel_power_ctrl,
ret = da8xx_register_lcdc(&sharp_lk043t1dg01_pdata);
if (ret)
pr_warning("da850_evm_init: lcdc registration failed: %d\n",

View file

@ -77,7 +77,6 @@ typedef struct user_fpu_struct elf_fpregset_t;
#endif
#define ELF_ARCH EM_AVR32
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical

View file

@ -0,0 +1,28 @@
/*
* Blackfin LCD Framebuffer driver SHARP LQ035Q1DH02
*
* Copyright 2008-2009 Analog Devices Inc.
* Licensed under the GPL-2 or later.
*/
#ifndef BFIN_LQ035Q1_H
#define BFIN_LQ035Q1_H
#define LQ035_RL (0 << 8) /* Right -> Left Scan */
#define LQ035_LR (1 << 8) /* Left -> Right Scan */
#define LQ035_TB (1 << 9) /* Top -> Botton Scan */
#define LQ035_BT (0 << 9) /* Botton -> Top Scan */
#define LQ035_BGR (1 << 11) /* Use BGR format */
#define LQ035_RGB (0 << 11) /* Use RGB format */
#define LQ035_NORM (1 << 13) /* Reversal */
#define LQ035_REV (0 << 13) /* Reversal */
struct bfin_lq035q1fb_disp_info {
unsigned mode;
/* GPIOs */
int use_bl;
unsigned gpio_bl;
};
#endif /* BFIN_LQ035Q1_H */

View file

@ -55,7 +55,6 @@ do { \
_regs->p2 = _dynamic_addr; \
} while(0)
#define USE_ELF_CORE_DUMP
#define ELF_FDPIC_CORE_EFLAGS EF_BFIN_FDPIC
#define ELF_EXEC_PAGESIZE 4096

View file

@ -64,8 +64,6 @@ typedef unsigned long elf_fpregset_t;
#define EF_CRIS_VARIANT_COMMON_V10_V32 0x00000004
/* End of excerpt from {binutils}/include/elf/cris.h. */
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 8192
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical

View file

@ -115,7 +115,6 @@ do { \
__kernel_frame0_ptr->gr29 = 0; \
} while(0)
#define USE_ELF_CORE_DUMP
#define CORE_DUMP_USE_REGSET
#define ELF_FDPIC_CORE_EFLAGS EF_FRV_FDPIC
#define ELF_EXEC_PAGESIZE 16384

View file

@ -34,7 +34,6 @@ typedef unsigned long elf_fpregset_t;
#define ELF_PLAT_INIT(_r) _r->er1 = 0
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical

View file

@ -11,8 +11,6 @@
#include <asm/intrinsics.h>
#include <asm/uaccess.h>
#define USE_ELF_CORE_DUMP 1
/* Override elfcore.h */
#define _LINUX_ELFCORE_H 1
typedef unsigned int elf_greg_t;

View file

@ -73,7 +73,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
if (!dev->dma_mask)
return 0;
return addr + size <= *dev->dma_mask;
return addr + size - 1 <= *dev->dma_mask;
}
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)

View file

@ -25,7 +25,6 @@
#define ELF_DATA ELFDATA2LSB
#define ELF_ARCH EM_IA_64
#define USE_ELF_CORE_DUMP
#define CORE_DUMP_USE_REGSET
/* Least-significant four bits of ELF header's e_flags are OS-specific. The bits are

View file

@ -9,6 +9,7 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/bitmap.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/addrs.h>
#include <asm/sn/io.h>
@ -369,7 +370,7 @@ tioca_dma_d48(struct pci_dev *pdev, u64 paddr)
static dma_addr_t
tioca_dma_mapped(struct pci_dev *pdev, unsigned long paddr, size_t req_size)
{
int i, ps, ps_shift, entry, entries, mapsize, last_entry;
int ps, ps_shift, entry, entries, mapsize;
u64 xio_addr, end_xio_addr;
struct tioca_common *tioca_common;
struct tioca_kernel *tioca_kern;
@ -410,23 +411,13 @@ tioca_dma_mapped(struct pci_dev *pdev, unsigned long paddr, size_t req_size)
map = tioca_kern->ca_pcigart_pagemap;
mapsize = tioca_kern->ca_pcigart_entries;
entry = find_first_zero_bit(map, mapsize);
while (entry < mapsize) {
last_entry = find_next_bit(map, mapsize, entry);
if (last_entry - entry >= entries)
break;
entry = find_next_zero_bit(map, mapsize, last_entry);
}
if (entry > mapsize) {
entry = bitmap_find_next_zero_area(map, mapsize, 0, entries, 0);
if (entry >= mapsize) {
kfree(ca_dmamap);
goto map_return;
}
for (i = 0; i < entries; i++)
set_bit(entry + i, map);
bitmap_set(map, entry, entries);
bus_addr = tioca_kern->ca_pciap_base + (entry * ps);

View file

@ -102,7 +102,6 @@ typedef elf_fpreg_t elf_fpregset_t;
*/
#define ELF_PLAT_INIT(_r, load_addr) (_r)->r0 = 0
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/*

View file

@ -59,7 +59,6 @@ typedef struct user_m68kfp_struct elf_fpregset_t;
is actually used on ASV. */
#define ELF_PLAT_INIT(_r, load_addr) _r->a1 = 0
#define USE_ELF_CORE_DUMP
#ifndef CONFIG_SUN3
#define ELF_EXEC_PAGESIZE 4096
#else

View file

@ -77,7 +77,6 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#define ELF_DATA ELFDATA2MSB
#endif
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096

View file

@ -326,7 +326,6 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) \
dump_task_fpu(tsk, elf_fpregs)
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/* This yields a mask that user programs can use to figure out what

View file

@ -77,7 +77,6 @@ do { \
_ur->a1 = 0; _ur->a0 = 0; _ur->d1 = 0; _ur->d0 = 0; \
} while (0)
#define USE_ELF_CORE_DUMP
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE 4096

View file

@ -328,7 +328,6 @@ struct pt_regs; /* forward declaration... */
such function. */
#define ELF_PLAT_INIT(_r, load_addr) _r->gr[23] = 0
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical

View file

@ -197,7 +197,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
if (!dev->dma_mask)
return 0;
return addr + size <= *dev->dma_mask;
return addr + size - 1 <= *dev->dma_mask;
}
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)

View file

@ -170,7 +170,6 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
#define elf_check_arch(x) ((x)->e_machine == ELF_ARCH)
#define compat_elf_check_arch(x) ((x)->e_machine == EM_PPC)
#define USE_ELF_CORE_DUMP
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE PAGE_SIZE

View file

@ -140,6 +140,8 @@ extern void user_enable_single_step(struct task_struct *);
extern void user_enable_block_step(struct task_struct *);
extern void user_disable_single_step(struct task_struct *);
#define ARCH_HAS_USER_SINGLE_STEP_INFO
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */

View file

@ -30,7 +30,7 @@
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/dma-mapping.h>
#include <linux/bitops.h>
#include <linux/bitmap.h>
#include <linux/iommu-helper.h>
#include <linux/crash_dump.h>
#include <asm/io.h>
@ -251,7 +251,7 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
}
ppc_md.tce_free(tbl, entry, npages);
iommu_area_free(tbl->it_map, free_entry, npages);
bitmap_clear(tbl->it_map, free_entry, npages);
}
static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,

View file

@ -174,6 +174,15 @@ int die(const char *str, struct pt_regs *regs, long err)
return 0;
}
void user_single_step_siginfo(struct task_struct *tsk,
struct pt_regs *regs, siginfo_t *info)
{
memset(info, 0, sizeof(*info));
info->si_signo = SIGTRAP;
info->si_code = TRAP_TRACE;
info->si_addr = (void __user *)regs->nip;
}
void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
{
siginfo_t info;

View file

@ -155,7 +155,6 @@ extern unsigned int vdso_enabled;
} while (0)
#define CORE_DUMP_USE_REGSET
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical

View file

@ -61,7 +61,6 @@ struct task_struct;
struct pt_regs;
#define CORE_DUMP_USE_REGSET
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/* This yields a mask that user programs can use to figure out what

View file

@ -114,7 +114,6 @@ typedef struct user_fpu_struct elf_fpregset_t;
*/
#define CORE_DUMP_USE_REGSET
#define USE_ELF_CORE_DUMP
#define ELF_FDPIC_CORE_EFLAGS EF_SH_FDPIC
#define ELF_EXEC_PAGESIZE PAGE_SIZE

View file

@ -104,8 +104,6 @@ typedef struct {
#define ELF_CLASS ELFCLASS32
#define ELF_DATA ELFDATA2MSB
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096

View file

@ -152,7 +152,6 @@ typedef struct {
(x)->e_machine == EM_SPARC32PLUS)
#define compat_start_thread start_thread32
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical

View file

@ -11,6 +11,7 @@
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/iommu-helper.h>
#include <linux/bitmap.h>
#ifdef CONFIG_PCI
#include <linux/pci.h>
@ -169,7 +170,7 @@ void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long np
entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
iommu_area_free(arena->map, entry, npages);
bitmap_clear(arena->map, entry, npages);
}
int iommu_table_init(struct iommu *iommu, int tsbsize,

View file

@ -14,6 +14,7 @@
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/init.h>
#include <linux/bitmap.h>
#include <asm/hypervisor.h>
#include <asm/iommu.h>
@ -1875,7 +1876,7 @@ EXPORT_SYMBOL(ldc_read);
static long arena_alloc(struct ldc_iommu *iommu, unsigned long npages)
{
struct iommu_arena *arena = &iommu->arena;
unsigned long n, i, start, end, limit;
unsigned long n, start, end, limit;
int pass;
limit = arena->limit;
@ -1883,7 +1884,7 @@ static long arena_alloc(struct ldc_iommu *iommu, unsigned long npages)
pass = 0;
again:
n = find_next_zero_bit(arena->map, limit, start);
n = bitmap_find_next_zero_area(arena->map, limit, start, npages, 0);
end = n + npages;
if (unlikely(end >= limit)) {
if (likely(pass < 1)) {
@ -1896,16 +1897,7 @@ static long arena_alloc(struct ldc_iommu *iommu, unsigned long npages)
return -1;
}
}
for (i = n; i < end; i++) {
if (test_bit(i, arena->map)) {
start = i + 1;
goto again;
}
}
for (i = n; i < end; i++)
__set_bit(i, arena->map);
bitmap_set(arena->map, n, npages);
arena->hint = end;

View file

@ -17,6 +17,7 @@
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/scatterlist.h>
#include <linux/bitmap.h>
#include <asm/sections.h>
#include <asm/page.h>
@ -1021,20 +1022,12 @@ static char *sun4c_lockarea(char *vaddr, unsigned long size)
npages = (((unsigned long)vaddr & ~PAGE_MASK) +
size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
scan = 0;
local_irq_save(flags);
for (;;) {
scan = find_next_zero_bit(sun4c_iobuffer_map,
iobuffer_map_size, scan);
if ((base = scan) + npages > iobuffer_map_size) goto abend;
for (;;) {
if (scan >= base + npages) goto found;
if (test_bit(scan, sun4c_iobuffer_map)) break;
scan++;
}
}
base = bitmap_find_next_zero_area(sun4c_iobuffer_map, iobuffer_map_size,
0, npages, 0);
if (base >= iobuffer_map_size)
goto abend;
found:
high = ((base + npages) << PAGE_SHIFT) + sun4c_iobuffer_start;
high = SUN4C_REAL_PGDIR_ALIGN(high);
while (high > sun4c_iobuffer_high) {

View file

@ -48,7 +48,6 @@ typedef struct user_i387_struct elf_fpregset_t;
PT_REGS_EAX(regs) = 0; \
} while (0)
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)

View file

@ -17,8 +17,6 @@ extern long elf_aux_hwcap;
#define ELF_CLASS ELFCLASS32
#endif
#define USE_ELF_CORE_DUMP
#define R_386_NONE 0
#define R_386_32 1
#define R_386_PC32 2

View file

@ -104,7 +104,6 @@ extern int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu);
clear_thread_flag(TIF_IA32);
#endif
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)

View file

@ -67,7 +67,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
if (!dev->dma_mask)
return 0;
return addr + size <= *dev->dma_mask;
return addr + size - 1 <= *dev->dma_mask;
}
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)

View file

@ -239,7 +239,6 @@ extern int force_personality32;
#endif /* !CONFIG_X86_32 */
#define CORE_DUMP_USE_REGSET
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical

View file

@ -292,6 +292,8 @@ extern void user_enable_block_step(struct task_struct *);
#define arch_has_block_step() (boot_cpu_data.x86 >= 6)
#endif
#define ARCH_HAS_USER_SINGLE_STEP_INFO
struct user_desc;
extern int do_get_thread_area(struct task_struct *p, int idx,
struct user_desc __user *info);

View file

@ -76,15 +76,6 @@ union partition_info_u {
};
};
union uv_watchlist_u {
u64 val;
struct {
u64 blade : 16,
size : 32,
filler : 16;
};
};
enum uv_memprotect {
UV_MEMPROT_RESTRICT_ACCESS,
UV_MEMPROT_ALLOW_AMO,
@ -100,7 +91,7 @@ extern s64 uv_bios_call_reentrant(enum uv_bios_cmd, u64, u64, u64, u64, u64);
extern s64 uv_bios_get_sn_info(int, int *, long *, long *, long *);
extern s64 uv_bios_freq_base(u64, u64 *);
extern int uv_bios_mq_watchlist_alloc(int, unsigned long, unsigned int,
extern int uv_bios_mq_watchlist_alloc(unsigned long, unsigned int,
unsigned long *);
extern int uv_bios_mq_watchlist_free(int, int);
extern s64 uv_bios_change_memprotect(u64, u64, enum uv_memprotect);

View file

@ -172,6 +172,8 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
#define UV_LOCAL_MMR_SIZE (64UL * 1024 * 1024)
#define UV_GLOBAL_MMR32_SIZE (64UL * 1024 * 1024)
#define UV_GLOBAL_GRU_MMR_BASE 0x4000000
#define UV_GLOBAL_MMR32_PNODE_SHIFT 15
#define UV_GLOBAL_MMR64_PNODE_SHIFT 26
@ -232,6 +234,26 @@ static inline unsigned long uv_gpa(void *v)
return uv_soc_phys_ram_to_gpa(__pa(v));
}
/* Top two bits indicate the requested address is in MMR space. */
static inline int
uv_gpa_in_mmr_space(unsigned long gpa)
{
return (gpa >> 62) == 0x3UL;
}
/* UV global physical address --> socket phys RAM */
static inline unsigned long uv_gpa_to_soc_phys_ram(unsigned long gpa)
{
unsigned long paddr = gpa & uv_hub_info->gpa_mask;
unsigned long remap_base = uv_hub_info->lowmem_remap_base;
unsigned long remap_top = uv_hub_info->lowmem_remap_top;
if (paddr >= remap_base && paddr < remap_base + remap_top)
paddr -= remap_base;
return paddr;
}
/* gnode -> pnode */
static inline unsigned long uv_gpa_to_gnode(unsigned long gpa)
{
@ -307,6 +329,15 @@ static inline unsigned long uv_read_global_mmr64(int pnode,
return readq(uv_global_mmr64_address(pnode, offset));
}
/*
* Global MMR space addresses when referenced by the GRU. (GRU does
* NOT use socket addressing).
*/
static inline unsigned long uv_global_gru_mmr_address(int pnode, unsigned long offset)
{
return UV_GLOBAL_GRU_MMR_BASE | offset | (pnode << uv_hub_info->m_val);
}
/*
* Access hub local MMRs. Faster than using global space but only local MMRs
* are accessible.
@ -434,6 +465,14 @@ static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value)
}
}
static unsigned long uv_hub_ipi_value(int apicid, int vector, int mode)
{
return (1UL << UVH_IPI_INT_SEND_SHFT) |
((apicid) << UVH_IPI_INT_APIC_ID_SHFT) |
(mode << UVH_IPI_INT_DELIVERY_MODE_SHFT) |
(vector << UVH_IPI_INT_VECTOR_SHFT);
}
static inline void uv_hub_send_ipi(int pnode, int apicid, int vector)
{
unsigned long val;
@ -442,10 +481,7 @@ static inline void uv_hub_send_ipi(int pnode, int apicid, int vector)
if (vector == NMI_VECTOR)
dmode = dest_NMI;
val = (1UL << UVH_IPI_INT_SEND_SHFT) |
((apicid) << UVH_IPI_INT_APIC_ID_SHFT) |
(dmode << UVH_IPI_INT_DELIVERY_MODE_SHFT) |
(vector << UVH_IPI_INT_VECTOR_SHFT);
val = uv_hub_ipi_value(apicid, vector, dmode);
uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
}

View file

@ -19,7 +19,7 @@
#include <linux/pci.h>
#include <linux/gfp.h>
#include <linux/bitops.h>
#include <linux/bitmap.h>
#include <linux/debugfs.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
@ -1162,7 +1162,7 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom,
address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT;
iommu_area_free(range->bitmap, address, pages);
bitmap_clear(range->bitmap, address, pages);
}

View file

@ -101,21 +101,17 @@ s64 uv_bios_get_sn_info(int fc, int *uvtype, long *partid, long *coher,
}
int
uv_bios_mq_watchlist_alloc(int blade, unsigned long addr, unsigned int mq_size,
uv_bios_mq_watchlist_alloc(unsigned long addr, unsigned int mq_size,
unsigned long *intr_mmr_offset)
{
union uv_watchlist_u size_blade;
u64 watchlist;
s64 ret;
size_blade.size = mq_size;
size_blade.blade = blade;
/*
* bios returns watchlist number or negative error number.
*/
ret = (int)uv_bios_call_irqsave(UV_BIOS_WATCHLIST_ALLOC, addr,
size_blade.val, (u64)intr_mmr_offset,
mq_size, (u64)intr_mmr_offset,
(u64)&watchlist, 0);
if (ret < BIOS_STATUS_SUCCESS)
return ret;

View file

@ -31,7 +31,7 @@
#include <linux/string.h>
#include <linux/crash_dump.h>
#include <linux/dma-mapping.h>
#include <linux/bitops.h>
#include <linux/bitmap.h>
#include <linux/pci_ids.h>
#include <linux/pci.h>
#include <linux/delay.h>
@ -212,7 +212,7 @@ static void iommu_range_reserve(struct iommu_table *tbl,
spin_lock_irqsave(&tbl->it_lock, flags);
iommu_area_reserve(tbl->it_map, index, npages);
bitmap_set(tbl->it_map, index, npages);
spin_unlock_irqrestore(&tbl->it_lock, flags);
}
@ -303,7 +303,7 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
spin_lock_irqsave(&tbl->it_lock, flags);
iommu_area_free(tbl->it_map, entry, npages);
bitmap_clear(tbl->it_map, entry, npages);
spin_unlock_irqrestore(&tbl->it_lock, flags);
}

View file

@ -23,7 +23,7 @@
#include <linux/module.h>
#include <linux/topology.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
#include <linux/bitmap.h>
#include <linux/kdebug.h>
#include <linux/scatterlist.h>
#include <linux/iommu-helper.h>
@ -126,7 +126,7 @@ static void free_iommu(unsigned long offset, int size)
unsigned long flags;
spin_lock_irqsave(&iommu_bitmap_lock, flags);
iommu_area_free(iommu_gart_bitmap, offset, size);
bitmap_clear(iommu_gart_bitmap, offset, size);
if (offset >= next_bit)
next_bit = offset + size;
spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
@ -792,7 +792,7 @@ int __init gart_iommu_init(void)
* Out of IOMMU space handling.
* Reserve some invalid pages at the beginning of the GART.
*/
iommu_area_reserve(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
bitmap_set(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
iommu_size >> 20);

View file

@ -1676,21 +1676,33 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
#endif
}
static void fill_sigtrap_info(struct task_struct *tsk,
struct pt_regs *regs,
int error_code, int si_code,
struct siginfo *info)
{
tsk->thread.trap_no = 1;
tsk->thread.error_code = error_code;
memset(info, 0, sizeof(*info));
info->si_signo = SIGTRAP;
info->si_code = si_code;
info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
}
void user_single_step_siginfo(struct task_struct *tsk,
struct pt_regs *regs,
struct siginfo *info)
{
fill_sigtrap_info(tsk, regs, 0, TRAP_BRKPT, info);
}
void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
int error_code, int si_code)
{
struct siginfo info;
tsk->thread.trap_no = 1;
tsk->thread.error_code = error_code;
memset(&info, 0, sizeof(info));
info.si_signo = SIGTRAP;
info.si_code = si_code;
/* User-mode ip? */
info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
fill_sigtrap_info(tsk, regs, error_code, si_code, &info);
/* Send us the fake SIGTRAP */
force_sig_info(SIGTRAP, &info, tsk);
}
@ -1755,29 +1767,22 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs)
asmregparm void syscall_trace_leave(struct pt_regs *regs)
{
bool step;
if (unlikely(current->audit_context))
audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_exit(regs, regs->ax);
if (test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, 0);
/*
* If TIF_SYSCALL_EMU is set, we only get here because of
* TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
* We already reported this syscall instruction in
* syscall_trace_enter(), so don't do any more now.
* syscall_trace_enter().
*/
if (unlikely(test_thread_flag(TIF_SYSCALL_EMU)))
return;
/*
* If we are single-stepping, synthesize a trap to follow the
* system call instruction.
*/
if (test_thread_flag(TIF_SINGLESTEP) &&
tracehook_consider_fatal_signal(current, SIGTRAP))
send_sigtrap(current, regs, 0, TRAP_BRKPT);
step = unlikely(test_thread_flag(TIF_SINGLESTEP)) &&
!test_thread_flag(TIF_SYSCALL_EMU);
if (step || test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, step);
}

View file

@ -123,7 +123,6 @@ extern void xtensa_elf_core_copy_regs (xtensa_gregset_t *, struct pt_regs *);
#define ELF_CLASS ELFCLASS32
#define ELF_ARCH EM_XTENSA
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/*

View file

@ -285,6 +285,7 @@ static const struct file_operations efi_rtc_fops = {
.unlocked_ioctl = efi_rtc_ioctl,
.open = efi_rtc_open,
.release = efi_rtc_close,
.llseek = no_llseek,
};
static struct miscdevice efi_rtc_dev= {

View file

@ -370,7 +370,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
return SI_SM_IDLE;
case KCS_START_OP:
if (state != KCS_IDLE) {
if (state != KCS_IDLE_STATE) {
start_error_recovery(kcs,
"State machine not idle at start");
break;

View file

@ -339,7 +339,7 @@ static struct sysrq_key_op sysrq_term_op = {
static void moom_callback(struct work_struct *ignored)
{
out_of_memory(node_zonelist(0, GFP_KERNEL), GFP_KERNEL, 0);
out_of_memory(node_zonelist(0, GFP_KERNEL), GFP_KERNEL, 0, NULL);
}
static DECLARE_WORK(moom_work, moom_callback);

View file

@ -164,6 +164,9 @@ module_param(default_utf8, int, S_IRUGO | S_IWUSR);
int global_cursor_default = -1;
module_param(global_cursor_default, int, S_IRUGO | S_IWUSR);
static int cur_default = CUR_DEFAULT;
module_param(cur_default, int, S_IRUGO | S_IWUSR);
/*
* ignore_poke: don't unblank the screen when things are typed. This is
* mainly for the privacy of braille terminal users.
@ -1636,7 +1639,7 @@ static void reset_terminal(struct vc_data *vc, int do_clear)
/* do not do set_leds here because this causes an endless tasklet loop
when the keyboard hasn't been initialized yet */
vc->vc_cursor_type = CUR_DEFAULT;
vc->vc_cursor_type = cur_default;
vc->vc_complement_mask = vc->vc_s_complement_mask;
default_attr(vc);
@ -1838,7 +1841,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
if (vc->vc_par[0])
vc->vc_cursor_type = vc->vc_par[0] | (vc->vc_par[1] << 8) | (vc->vc_par[2] << 16);
else
vc->vc_cursor_type = CUR_DEFAULT;
vc->vc_cursor_type = cur_default;
return;
}
break;

View file

@ -9,6 +9,11 @@
* Intel 5100X Chipset Memory Controller Hub (MCH) - Datasheet
* http://download.intel.com/design/chipsets/datashts/318378.pdf
*
* The intel 5100 has two independent channels. EDAC core currently
* can not reflect this configuration so instead the chip-select
* rows for each respective channel are layed out one after another,
* the first half belonging to channel 0, the second half belonging
* to channel 1.
*/
#include <linux/module.h>
#include <linux/init.h>
@ -25,6 +30,8 @@
/* device 16, func 1 */
#define I5100_MC 0x40 /* Memory Control Register */
#define I5100_MC_SCRBEN_MASK (1 << 7)
#define I5100_MC_SCRBDONE_MASK (1 << 4)
#define I5100_MS 0x44 /* Memory Status Register */
#define I5100_SPDDATA 0x48 /* Serial Presence Detect Status Reg */
#define I5100_SPDCMD 0x4c /* Serial Presence Detect Command Reg */
@ -72,11 +79,21 @@
/* bit field accessors */
static inline u32 i5100_mc_scrben(u32 mc)
{
return mc >> 7 & 1;
}
static inline u32 i5100_mc_errdeten(u32 mc)
{
return mc >> 5 & 1;
}
static inline u32 i5100_mc_scrbdone(u32 mc)
{
return mc >> 4 & 1;
}
static inline u16 i5100_spddata_rdo(u16 a)
{
return a >> 15 & 1;
@ -265,42 +282,43 @@ static inline u32 i5100_recmemb_ras(u32 a)
}
/* some generic limits */
#define I5100_MAX_RANKS_PER_CTLR 6
#define I5100_MAX_CTLRS 2
#define I5100_MAX_RANKS_PER_CHAN 6
#define I5100_CHANNELS 2
#define I5100_MAX_RANKS_PER_DIMM 4
#define I5100_DIMM_ADDR_LINES (6 - 3) /* 64 bits / 8 bits per byte */
#define I5100_MAX_DIMM_SLOTS_PER_CTLR 4
#define I5100_MAX_DIMM_SLOTS_PER_CHAN 4
#define I5100_MAX_RANK_INTERLEAVE 4
#define I5100_MAX_DMIRS 5
#define I5100_SCRUB_REFRESH_RATE (5 * 60 * HZ)
struct i5100_priv {
/* ranks on each dimm -- 0 maps to not present -- obtained via SPD */
int dimm_numrank[I5100_MAX_CTLRS][I5100_MAX_DIMM_SLOTS_PER_CTLR];
int dimm_numrank[I5100_CHANNELS][I5100_MAX_DIMM_SLOTS_PER_CHAN];
/*
* mainboard chip select map -- maps i5100 chip selects to
* DIMM slot chip selects. In the case of only 4 ranks per
* controller, the mapping is fairly obvious but not unique.
* we map -1 -> NC and assume both controllers use the same
* channel, the mapping is fairly obvious but not unique.
* we map -1 -> NC and assume both channels use the same
* map...
*
*/
int dimm_csmap[I5100_MAX_DIMM_SLOTS_PER_CTLR][I5100_MAX_RANKS_PER_DIMM];
int dimm_csmap[I5100_MAX_DIMM_SLOTS_PER_CHAN][I5100_MAX_RANKS_PER_DIMM];
/* memory interleave range */
struct {
u64 limit;
unsigned way[2];
} mir[I5100_MAX_CTLRS];
} mir[I5100_CHANNELS];
/* adjusted memory interleave range register */
unsigned amir[I5100_MAX_CTLRS];
unsigned amir[I5100_CHANNELS];
/* dimm interleave range */
struct {
unsigned rank[I5100_MAX_RANK_INTERLEAVE];
u64 limit;
} dmir[I5100_MAX_CTLRS][I5100_MAX_DMIRS];
} dmir[I5100_CHANNELS][I5100_MAX_DMIRS];
/* memory technology registers... */
struct {
@ -310,30 +328,33 @@ struct i5100_priv {
unsigned numbank; /* 2 or 3 lines */
unsigned numrow; /* 13 .. 16 lines */
unsigned numcol; /* 11 .. 12 lines */
} mtr[I5100_MAX_CTLRS][I5100_MAX_RANKS_PER_CTLR];
} mtr[I5100_CHANNELS][I5100_MAX_RANKS_PER_CHAN];
u64 tolm; /* top of low memory in bytes */
unsigned ranksperctlr; /* number of ranks per controller */
unsigned ranksperchan; /* number of ranks per channel */
struct pci_dev *mc; /* device 16 func 1 */
struct pci_dev *ch0mm; /* device 21 func 0 */
struct pci_dev *ch1mm; /* device 22 func 0 */
struct delayed_work i5100_scrubbing;
int scrub_enable;
};
/* map a rank/ctlr to a slot number on the mainboard */
/* map a rank/chan to a slot number on the mainboard */
static int i5100_rank_to_slot(const struct mem_ctl_info *mci,
int ctlr, int rank)
int chan, int rank)
{
const struct i5100_priv *priv = mci->pvt_info;
int i;
for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CTLR; i++) {
for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CHAN; i++) {
int j;
const int numrank = priv->dimm_numrank[ctlr][i];
const int numrank = priv->dimm_numrank[chan][i];
for (j = 0; j < numrank; j++)
if (priv->dimm_csmap[i][j] == rank)
return i * 2 + ctlr;
return i * 2 + chan;
}
return -1;
@ -374,32 +395,32 @@ static const char *i5100_err_msg(unsigned err)
return "none";
}
/* convert csrow index into a rank (per controller -- 0..5) */
/* convert csrow index into a rank (per channel -- 0..5) */
static int i5100_csrow_to_rank(const struct mem_ctl_info *mci, int csrow)
{
const struct i5100_priv *priv = mci->pvt_info;
return csrow % priv->ranksperctlr;
return csrow % priv->ranksperchan;
}
/* convert csrow index into a controller (0..1) */
static int i5100_csrow_to_cntlr(const struct mem_ctl_info *mci, int csrow)
/* convert csrow index into a channel (0..1) */
static int i5100_csrow_to_chan(const struct mem_ctl_info *mci, int csrow)
{
const struct i5100_priv *priv = mci->pvt_info;
return csrow / priv->ranksperctlr;
return csrow / priv->ranksperchan;
}
static unsigned i5100_rank_to_csrow(const struct mem_ctl_info *mci,
int ctlr, int rank)
int chan, int rank)
{
const struct i5100_priv *priv = mci->pvt_info;
return ctlr * priv->ranksperctlr + rank;
return chan * priv->ranksperchan + rank;
}
static void i5100_handle_ce(struct mem_ctl_info *mci,
int ctlr,
int chan,
unsigned bank,
unsigned rank,
unsigned long syndrome,
@ -407,12 +428,12 @@ static void i5100_handle_ce(struct mem_ctl_info *mci,
unsigned ras,
const char *msg)
{
const int csrow = i5100_rank_to_csrow(mci, ctlr, rank);
const int csrow = i5100_rank_to_csrow(mci, chan, rank);
printk(KERN_ERR
"CE ctlr %d, bank %u, rank %u, syndrome 0x%lx, "
"CE chan %d, bank %u, rank %u, syndrome 0x%lx, "
"cas %u, ras %u, csrow %u, label \"%s\": %s\n",
ctlr, bank, rank, syndrome, cas, ras,
chan, bank, rank, syndrome, cas, ras,
csrow, mci->csrows[csrow].channels[0].label, msg);
mci->ce_count++;
@ -421,7 +442,7 @@ static void i5100_handle_ce(struct mem_ctl_info *mci,
}
static void i5100_handle_ue(struct mem_ctl_info *mci,
int ctlr,
int chan,
unsigned bank,
unsigned rank,
unsigned long syndrome,
@ -429,23 +450,23 @@ static void i5100_handle_ue(struct mem_ctl_info *mci,
unsigned ras,
const char *msg)
{
const int csrow = i5100_rank_to_csrow(mci, ctlr, rank);
const int csrow = i5100_rank_to_csrow(mci, chan, rank);
printk(KERN_ERR
"UE ctlr %d, bank %u, rank %u, syndrome 0x%lx, "
"UE chan %d, bank %u, rank %u, syndrome 0x%lx, "
"cas %u, ras %u, csrow %u, label \"%s\": %s\n",
ctlr, bank, rank, syndrome, cas, ras,
chan, bank, rank, syndrome, cas, ras,
csrow, mci->csrows[csrow].channels[0].label, msg);
mci->ue_count++;
mci->csrows[csrow].ue_count++;
}
static void i5100_read_log(struct mem_ctl_info *mci, int ctlr,
static void i5100_read_log(struct mem_ctl_info *mci, int chan,
u32 ferr, u32 nerr)
{
struct i5100_priv *priv = mci->pvt_info;
struct pci_dev *pdev = (ctlr) ? priv->ch1mm : priv->ch0mm;
struct pci_dev *pdev = (chan) ? priv->ch1mm : priv->ch0mm;
u32 dw;
u32 dw2;
unsigned syndrome = 0;
@ -484,7 +505,7 @@ static void i5100_read_log(struct mem_ctl_info *mci, int ctlr,
else
msg = i5100_err_msg(nerr);
i5100_handle_ce(mci, ctlr, bank, rank, syndrome, cas, ras, msg);
i5100_handle_ce(mci, chan, bank, rank, syndrome, cas, ras, msg);
}
if (i5100_validlog_nrecmemvalid(dw)) {
@ -506,7 +527,7 @@ static void i5100_read_log(struct mem_ctl_info *mci, int ctlr,
else
msg = i5100_err_msg(nerr);
i5100_handle_ue(mci, ctlr, bank, rank, syndrome, cas, ras, msg);
i5100_handle_ue(mci, chan, bank, rank, syndrome, cas, ras, msg);
}
pci_write_config_dword(pdev, I5100_VALIDLOG, dw);
@ -534,6 +555,80 @@ static void i5100_check_error(struct mem_ctl_info *mci)
}
}
/* The i5100 chipset will scrub the entire memory once, then
* set a done bit. Continuous scrubbing is achieved by enqueing
* delayed work to a workqueue, checking every few minutes if
* the scrubbing has completed and if so reinitiating it.
*/
static void i5100_refresh_scrubbing(struct work_struct *work)
{
struct delayed_work *i5100_scrubbing = container_of(work,
struct delayed_work,
work);
struct i5100_priv *priv = container_of(i5100_scrubbing,
struct i5100_priv,
i5100_scrubbing);
u32 dw;
pci_read_config_dword(priv->mc, I5100_MC, &dw);
if (priv->scrub_enable) {
pci_read_config_dword(priv->mc, I5100_MC, &dw);
if (i5100_mc_scrbdone(dw)) {
dw |= I5100_MC_SCRBEN_MASK;
pci_write_config_dword(priv->mc, I5100_MC, dw);
pci_read_config_dword(priv->mc, I5100_MC, &dw);
}
schedule_delayed_work(&(priv->i5100_scrubbing),
I5100_SCRUB_REFRESH_RATE);
}
}
/*
* The bandwidth is based on experimentation, feel free to refine it.
*/
static int i5100_set_scrub_rate(struct mem_ctl_info *mci,
u32 *bandwidth)
{
struct i5100_priv *priv = mci->pvt_info;
u32 dw;
pci_read_config_dword(priv->mc, I5100_MC, &dw);
if (*bandwidth) {
priv->scrub_enable = 1;
dw |= I5100_MC_SCRBEN_MASK;
schedule_delayed_work(&(priv->i5100_scrubbing),
I5100_SCRUB_REFRESH_RATE);
} else {
priv->scrub_enable = 0;
dw &= ~I5100_MC_SCRBEN_MASK;
cancel_delayed_work(&(priv->i5100_scrubbing));
}
pci_write_config_dword(priv->mc, I5100_MC, dw);
pci_read_config_dword(priv->mc, I5100_MC, &dw);
*bandwidth = 5900000 * i5100_mc_scrben(dw);
return 0;
}
static int i5100_get_scrub_rate(struct mem_ctl_info *mci,
u32 *bandwidth)
{
struct i5100_priv *priv = mci->pvt_info;
u32 dw;
pci_read_config_dword(priv->mc, I5100_MC, &dw);
*bandwidth = 5900000 * i5100_mc_scrben(dw);
return 0;
}
static struct pci_dev *pci_get_device_func(unsigned vendor,
unsigned device,
unsigned func)
@ -557,19 +652,19 @@ static unsigned long __devinit i5100_npages(struct mem_ctl_info *mci,
int csrow)
{
struct i5100_priv *priv = mci->pvt_info;
const unsigned ctlr_rank = i5100_csrow_to_rank(mci, csrow);
const unsigned ctlr = i5100_csrow_to_cntlr(mci, csrow);
const unsigned chan_rank = i5100_csrow_to_rank(mci, csrow);
const unsigned chan = i5100_csrow_to_chan(mci, csrow);
unsigned addr_lines;
/* dimm present? */
if (!priv->mtr[ctlr][ctlr_rank].present)
if (!priv->mtr[chan][chan_rank].present)
return 0ULL;
addr_lines =
I5100_DIMM_ADDR_LINES +
priv->mtr[ctlr][ctlr_rank].numcol +
priv->mtr[ctlr][ctlr_rank].numrow +
priv->mtr[ctlr][ctlr_rank].numbank;
priv->mtr[chan][chan_rank].numcol +
priv->mtr[chan][chan_rank].numrow +
priv->mtr[chan][chan_rank].numbank;
return (unsigned long)
((unsigned long long) (1ULL << addr_lines) / PAGE_SIZE);
@ -581,11 +676,11 @@ static void __devinit i5100_init_mtr(struct mem_ctl_info *mci)
struct pci_dev *mms[2] = { priv->ch0mm, priv->ch1mm };
int i;
for (i = 0; i < I5100_MAX_CTLRS; i++) {
for (i = 0; i < I5100_CHANNELS; i++) {
int j;
struct pci_dev *pdev = mms[i];
for (j = 0; j < I5100_MAX_RANKS_PER_CTLR; j++) {
for (j = 0; j < I5100_MAX_RANKS_PER_CHAN; j++) {
const unsigned addr =
(j < 4) ? I5100_MTR_0 + j * 2 :
I5100_MTR_4 + (j - 4) * 2;
@ -644,7 +739,6 @@ static int i5100_read_spd_byte(const struct mem_ctl_info *mci,
* fill dimm chip select map
*
* FIXME:
* o only valid for 4 ranks per controller
* o not the only way to may chip selects to dimm slots
* o investigate if there is some way to obtain this map from the bios
*/
@ -653,9 +747,7 @@ static void __devinit i5100_init_dimm_csmap(struct mem_ctl_info *mci)
struct i5100_priv *priv = mci->pvt_info;
int i;
WARN_ON(priv->ranksperctlr != 4);
for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CTLR; i++) {
for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CHAN; i++) {
int j;
for (j = 0; j < I5100_MAX_RANKS_PER_DIMM; j++)
@ -663,12 +755,21 @@ static void __devinit i5100_init_dimm_csmap(struct mem_ctl_info *mci)
}
/* only 2 chip selects per slot... */
priv->dimm_csmap[0][0] = 0;
priv->dimm_csmap[0][1] = 3;
priv->dimm_csmap[1][0] = 1;
priv->dimm_csmap[1][1] = 2;
priv->dimm_csmap[2][0] = 2;
priv->dimm_csmap[3][0] = 3;
if (priv->ranksperchan == 4) {
priv->dimm_csmap[0][0] = 0;
priv->dimm_csmap[0][1] = 3;
priv->dimm_csmap[1][0] = 1;
priv->dimm_csmap[1][1] = 2;
priv->dimm_csmap[2][0] = 2;
priv->dimm_csmap[3][0] = 3;
} else {
priv->dimm_csmap[0][0] = 0;
priv->dimm_csmap[0][1] = 1;
priv->dimm_csmap[1][0] = 2;
priv->dimm_csmap[1][1] = 3;
priv->dimm_csmap[2][0] = 4;
priv->dimm_csmap[2][1] = 5;
}
}
static void __devinit i5100_init_dimm_layout(struct pci_dev *pdev,
@ -677,10 +778,10 @@ static void __devinit i5100_init_dimm_layout(struct pci_dev *pdev,
struct i5100_priv *priv = mci->pvt_info;
int i;
for (i = 0; i < I5100_MAX_CTLRS; i++) {
for (i = 0; i < I5100_CHANNELS; i++) {
int j;
for (j = 0; j < I5100_MAX_DIMM_SLOTS_PER_CTLR; j++) {
for (j = 0; j < I5100_MAX_DIMM_SLOTS_PER_CHAN; j++) {
u8 rank;
if (i5100_read_spd_byte(mci, i, j, 5, &rank) < 0)
@ -720,7 +821,7 @@ static void __devinit i5100_init_interleaving(struct pci_dev *pdev,
pci_read_config_word(pdev, I5100_AMIR_1, &w);
priv->amir[1] = w;
for (i = 0; i < I5100_MAX_CTLRS; i++) {
for (i = 0; i < I5100_CHANNELS; i++) {
int j;
for (j = 0; j < 5; j++) {
@ -747,7 +848,7 @@ static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
for (i = 0; i < mci->nr_csrows; i++) {
const unsigned long npages = i5100_npages(mci, i);
const unsigned cntlr = i5100_csrow_to_cntlr(mci, i);
const unsigned chan = i5100_csrow_to_chan(mci, i);
const unsigned rank = i5100_csrow_to_rank(mci, i);
if (!npages)
@ -765,7 +866,7 @@ static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
mci->csrows[i].grain = 32;
mci->csrows[i].csrow_idx = i;
mci->csrows[i].dtype =
(priv->mtr[cntlr][rank].width == 4) ? DEV_X4 : DEV_X8;
(priv->mtr[chan][rank].width == 4) ? DEV_X4 : DEV_X8;
mci->csrows[i].ue_count = 0;
mci->csrows[i].ce_count = 0;
mci->csrows[i].mtype = MEM_RDDR2;
@ -777,7 +878,7 @@ static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
mci->csrows[i].channels[0].csrow = mci->csrows + i;
snprintf(mci->csrows[i].channels[0].label,
sizeof(mci->csrows[i].channels[0].label),
"DIMM%u", i5100_rank_to_slot(mci, cntlr, rank));
"DIMM%u", i5100_rank_to_slot(mci, chan, rank));
total_pages += npages;
}
@ -815,13 +916,6 @@ static int __devinit i5100_init_one(struct pci_dev *pdev,
pci_read_config_dword(pdev, I5100_MS, &dw);
ranksperch = !!(dw & (1 << 8)) * 2 + 4;
if (ranksperch != 4) {
/* FIXME: get 6 ranks / controller to work - need hw... */
printk(KERN_INFO "i5100_edac: unsupported configuration.\n");
ret = -ENODEV;
goto bail_pdev;
}
/* enable error reporting... */
pci_read_config_dword(pdev, I5100_EMASK_MEM, &dw);
dw &= ~I5100_FERR_NF_MEM_ANY_MASK;
@ -864,11 +958,21 @@ static int __devinit i5100_init_one(struct pci_dev *pdev,
mci->dev = &pdev->dev;
priv = mci->pvt_info;
priv->ranksperctlr = ranksperch;
priv->ranksperchan = ranksperch;
priv->mc = pdev;
priv->ch0mm = ch0mm;
priv->ch1mm = ch1mm;
INIT_DELAYED_WORK(&(priv->i5100_scrubbing), i5100_refresh_scrubbing);
/* If scrubbing was already enabled by the bios, start maintaining it */
pci_read_config_dword(pdev, I5100_MC, &dw);
if (i5100_mc_scrben(dw)) {
priv->scrub_enable = 1;
schedule_delayed_work(&(priv->i5100_scrubbing),
I5100_SCRUB_REFRESH_RATE);
}
i5100_init_dimm_layout(pdev, mci);
i5100_init_interleaving(pdev, mci);
@ -882,6 +986,8 @@ static int __devinit i5100_init_one(struct pci_dev *pdev,
mci->ctl_page_to_phys = NULL;
mci->edac_check = i5100_check_error;
mci->set_sdram_scrub_rate = i5100_set_scrub_rate;
mci->get_sdram_scrub_rate = i5100_get_scrub_rate;
i5100_init_csrows(mci);
@ -897,12 +1003,14 @@ static int __devinit i5100_init_one(struct pci_dev *pdev,
if (edac_mc_add_mc(mci)) {
ret = -ENODEV;
goto bail_mc;
goto bail_scrub;
}
return ret;
bail_mc:
bail_scrub:
priv->scrub_enable = 0;
cancel_delayed_work_sync(&(priv->i5100_scrubbing));
edac_mc_free(mci);
bail_disable_ch1:
@ -935,6 +1043,10 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
return;
priv = mci->pvt_info;
priv->scrub_enable = 0;
cancel_delayed_work_sync(&(priv->i5100_scrubbing));
pci_disable_device(pdev);
pci_disable_device(priv->ch0mm);
pci_disable_device(priv->ch1mm);

View file

@ -206,6 +206,12 @@ config GPIO_LANGWELL
help
Say Y here to support Intel Moorestown platform GPIO.
config GPIO_TIMBERDALE
bool "Support for timberdale GPIO IP"
depends on MFD_TIMBERDALE && GPIOLIB && HAS_IOMEM
---help---
Add support for the GPIO IP in the timberdale FPGA.
comment "SPI GPIO expanders:"
config GPIO_MAX7301

View file

@ -13,6 +13,7 @@ obj-$(CONFIG_GPIO_MCP23S08) += mcp23s08.o
obj-$(CONFIG_GPIO_PCA953X) += pca953x.o
obj-$(CONFIG_GPIO_PCF857X) += pcf857x.o
obj-$(CONFIG_GPIO_PL061) += pl061.o
obj-$(CONFIG_GPIO_TIMBERDALE) += timbgpio.o
obj-$(CONFIG_GPIO_TWL4030) += twl4030-gpio.o
obj-$(CONFIG_GPIO_UCB1400) += ucb1400_gpio.o
obj-$(CONFIG_GPIO_XILINX) += xilinx_gpio.o

View file

@ -53,6 +53,7 @@ struct gpio_desc {
#define FLAG_SYSFS 4 /* exported via /sys/class/gpio/control */
#define FLAG_TRIG_FALL 5 /* trigger on falling edge */
#define FLAG_TRIG_RISE 6 /* trigger on rising edge */
#define FLAG_ACTIVE_LOW 7 /* sysfs value has active low */
#define PDESC_ID_SHIFT 16 /* add new flags before this one */
@ -210,6 +211,11 @@ static DEFINE_MUTEX(sysfs_lock);
* * configures behavior of poll(2) on /value
* * available only if pin can generate IRQs on input
* * is read/write as "none", "falling", "rising", or "both"
* /active_low
* * configures polarity of /value
* * is read/write as zero/nonzero
* * also affects existing and subsequent "falling" and "rising"
* /edge configuration
*/
static ssize_t gpio_direction_show(struct device *dev,
@ -255,7 +261,7 @@ static ssize_t gpio_direction_store(struct device *dev,
return status ? : size;
}
static const DEVICE_ATTR(direction, 0644,
static /* const */ DEVICE_ATTR(direction, 0644,
gpio_direction_show, gpio_direction_store);
static ssize_t gpio_value_show(struct device *dev,
@ -267,10 +273,17 @@ static ssize_t gpio_value_show(struct device *dev,
mutex_lock(&sysfs_lock);
if (!test_bit(FLAG_EXPORT, &desc->flags))
if (!test_bit(FLAG_EXPORT, &desc->flags)) {
status = -EIO;
else
status = sprintf(buf, "%d\n", !!gpio_get_value_cansleep(gpio));
} else {
int value;
value = !!gpio_get_value_cansleep(gpio);
if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
status = sprintf(buf, "%d\n", value);
}
mutex_unlock(&sysfs_lock);
return status;
@ -294,6 +307,8 @@ static ssize_t gpio_value_store(struct device *dev,
status = strict_strtol(buf, 0, &value);
if (status == 0) {
if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
gpio_set_value_cansleep(gpio, value != 0);
status = size;
}
@ -303,7 +318,7 @@ static ssize_t gpio_value_store(struct device *dev,
return status;
}
static /*const*/ DEVICE_ATTR(value, 0644,
static const DEVICE_ATTR(value, 0644,
gpio_value_show, gpio_value_store);
static irqreturn_t gpio_sysfs_irq(int irq, void *priv)
@ -352,9 +367,11 @@ static int gpio_setup_irq(struct gpio_desc *desc, struct device *dev,
irq_flags = IRQF_SHARED;
if (test_bit(FLAG_TRIG_FALL, &gpio_flags))
irq_flags |= IRQF_TRIGGER_FALLING;
irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
if (test_bit(FLAG_TRIG_RISE, &gpio_flags))
irq_flags |= IRQF_TRIGGER_RISING;
irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
if (!pdesc) {
pdesc = kmalloc(sizeof(*pdesc), GFP_KERNEL);
@ -475,9 +492,79 @@ static ssize_t gpio_edge_store(struct device *dev,
static DEVICE_ATTR(edge, 0644, gpio_edge_show, gpio_edge_store);
static int sysfs_set_active_low(struct gpio_desc *desc, struct device *dev,
int value)
{
int status = 0;
if (!!test_bit(FLAG_ACTIVE_LOW, &desc->flags) == !!value)
return 0;
if (value)
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
else
clear_bit(FLAG_ACTIVE_LOW, &desc->flags);
/* reconfigure poll(2) support if enabled on one edge only */
if (dev != NULL && (!!test_bit(FLAG_TRIG_RISE, &desc->flags) ^
!!test_bit(FLAG_TRIG_FALL, &desc->flags))) {
unsigned long trigger_flags = desc->flags & GPIO_TRIGGER_MASK;
gpio_setup_irq(desc, dev, 0);
status = gpio_setup_irq(desc, dev, trigger_flags);
}
return status;
}
static ssize_t gpio_active_low_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
const struct gpio_desc *desc = dev_get_drvdata(dev);
ssize_t status;
mutex_lock(&sysfs_lock);
if (!test_bit(FLAG_EXPORT, &desc->flags))
status = -EIO;
else
status = sprintf(buf, "%d\n",
!!test_bit(FLAG_ACTIVE_LOW, &desc->flags));
mutex_unlock(&sysfs_lock);
return status;
}
static ssize_t gpio_active_low_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct gpio_desc *desc = dev_get_drvdata(dev);
ssize_t status;
mutex_lock(&sysfs_lock);
if (!test_bit(FLAG_EXPORT, &desc->flags)) {
status = -EIO;
} else {
long value;
status = strict_strtol(buf, 0, &value);
if (status == 0)
status = sysfs_set_active_low(desc, dev, value != 0);
}
mutex_unlock(&sysfs_lock);
return status ? : size;
}
static const DEVICE_ATTR(active_low, 0644,
gpio_active_low_show, gpio_active_low_store);
static const struct attribute *gpio_attrs[] = {
&dev_attr_direction.attr,
&dev_attr_value.attr,
&dev_attr_active_low.attr,
NULL,
};
@ -662,12 +749,12 @@ int gpio_export(unsigned gpio, bool direction_may_change)
dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0),
desc, ioname ? ioname : "gpio%d", gpio);
if (!IS_ERR(dev)) {
if (direction_may_change)
status = sysfs_create_group(&dev->kobj,
status = sysfs_create_group(&dev->kobj,
&gpio_attr_group);
else
if (!status && direction_may_change)
status = device_create_file(dev,
&dev_attr_value);
&dev_attr_direction);
if (!status && gpio_to_irq(gpio) >= 0
&& (direction_may_change
@ -744,6 +831,55 @@ int gpio_export_link(struct device *dev, const char *name, unsigned gpio)
}
EXPORT_SYMBOL_GPL(gpio_export_link);
/**
* gpio_sysfs_set_active_low - set the polarity of gpio sysfs value
* @gpio: gpio to change
* @value: non-zero to use active low, i.e. inverted values
*
* Set the polarity of /sys/class/gpio/gpioN/value sysfs attribute.
* The GPIO does not have to be exported yet. If poll(2) support has
* been enabled for either rising or falling edge, it will be
* reconfigured to follow the new polarity.
*
* Returns zero on success, else an error.
*/
int gpio_sysfs_set_active_low(unsigned gpio, int value)
{
struct gpio_desc *desc;
struct device *dev = NULL;
int status = -EINVAL;
if (!gpio_is_valid(gpio))
goto done;
mutex_lock(&sysfs_lock);
desc = &gpio_desc[gpio];
if (test_bit(FLAG_EXPORT, &desc->flags)) {
struct device *dev;
dev = class_find_device(&gpio_class, NULL, desc, match_export);
if (dev == NULL) {
status = -ENODEV;
goto unlock;
}
}
status = sysfs_set_active_low(desc, dev, value);
unlock:
mutex_unlock(&sysfs_lock);
done:
if (status)
pr_debug("%s: gpio%d status %d\n", __func__, gpio, status);
return status;
}
EXPORT_SYMBOL_GPL(gpio_sysfs_set_active_low);
/**
* gpio_unexport - reverse effect of gpio_export()
* @gpio: gpio to make unavailable
@ -1094,6 +1230,7 @@ void gpio_free(unsigned gpio)
}
desc_set_label(desc, NULL);
module_put(desc->chip->owner);
clear_bit(FLAG_ACTIVE_LOW, &desc->flags);
clear_bit(FLAG_REQUESTED, &desc->flags);
} else
WARN_ON(extra_checks);

View file

@ -123,7 +123,7 @@ static int lnw_irq_type(unsigned irq, unsigned type)
void __iomem *grer = (void __iomem *)(&lnw->reg_base->GRER[reg]);
void __iomem *gfer = (void __iomem *)(&lnw->reg_base->GFER[reg]);
if (gpio < 0 || gpio > lnw->chip.ngpio)
if (gpio >= lnw->chip.ngpio)
return -EINVAL;
spin_lock_irqsave(&lnw->lock, flags);
if (type & IRQ_TYPE_EDGE_RISING)

342
drivers/gpio/timbgpio.c Normal file
View file

@ -0,0 +1,342 @@
/*
* timbgpio.c timberdale FPGA GPIO driver
* Copyright (c) 2009 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* Supports:
* Timberdale FPGA GPIO
*/
#include <linux/module.h>
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/timb_gpio.h>
#include <linux/interrupt.h>
#define DRIVER_NAME "timb-gpio"
#define TGPIOVAL 0x00
#define TGPIODIR 0x04
#define TGPIO_IER 0x08
#define TGPIO_ISR 0x0c
#define TGPIO_IPR 0x10
#define TGPIO_ICR 0x14
#define TGPIO_FLR 0x18
#define TGPIO_LVR 0x1c
struct timbgpio {
void __iomem *membase;
spinlock_t lock; /* mutual exclusion */
struct gpio_chip gpio;
int irq_base;
};
static int timbgpio_update_bit(struct gpio_chip *gpio, unsigned index,
unsigned offset, bool enabled)
{
struct timbgpio *tgpio = container_of(gpio, struct timbgpio, gpio);
u32 reg;
spin_lock(&tgpio->lock);
reg = ioread32(tgpio->membase + offset);
if (enabled)
reg |= (1 << index);
else
reg &= ~(1 << index);
iowrite32(reg, tgpio->membase + offset);
spin_unlock(&tgpio->lock);
return 0;
}
static int timbgpio_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
{
return timbgpio_update_bit(gpio, nr, TGPIODIR, true);
}
static int timbgpio_gpio_get(struct gpio_chip *gpio, unsigned nr)
{
struct timbgpio *tgpio = container_of(gpio, struct timbgpio, gpio);
u32 value;
value = ioread32(tgpio->membase + TGPIOVAL);
return (value & (1 << nr)) ? 1 : 0;
}
static int timbgpio_gpio_direction_output(struct gpio_chip *gpio,
unsigned nr, int val)
{
return timbgpio_update_bit(gpio, nr, TGPIODIR, false);
}
static void timbgpio_gpio_set(struct gpio_chip *gpio,
unsigned nr, int val)
{
timbgpio_update_bit(gpio, nr, TGPIOVAL, val != 0);
}
static int timbgpio_to_irq(struct gpio_chip *gpio, unsigned offset)
{
struct timbgpio *tgpio = container_of(gpio, struct timbgpio, gpio);
if (tgpio->irq_base <= 0)
return -EINVAL;
return tgpio->irq_base + offset;
}
/*
* GPIO IRQ
*/
static void timbgpio_irq_disable(unsigned irq)
{
struct timbgpio *tgpio = get_irq_chip_data(irq);
int offset = irq - tgpio->irq_base;
timbgpio_update_bit(&tgpio->gpio, offset, TGPIO_IER, 0);
}
static void timbgpio_irq_enable(unsigned irq)
{
struct timbgpio *tgpio = get_irq_chip_data(irq);
int offset = irq - tgpio->irq_base;
timbgpio_update_bit(&tgpio->gpio, offset, TGPIO_IER, 1);
}
static int timbgpio_irq_type(unsigned irq, unsigned trigger)
{
struct timbgpio *tgpio = get_irq_chip_data(irq);
int offset = irq - tgpio->irq_base;
unsigned long flags;
u32 lvr, flr;
if (offset < 0 || offset > tgpio->gpio.ngpio)
return -EINVAL;
spin_lock_irqsave(&tgpio->lock, flags);
lvr = ioread32(tgpio->membase + TGPIO_LVR);
flr = ioread32(tgpio->membase + TGPIO_FLR);
if (trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
flr &= ~(1 << offset);
if (trigger & IRQ_TYPE_LEVEL_HIGH)
lvr |= 1 << offset;
else
lvr &= ~(1 << offset);
}
if ((trigger & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
return -EINVAL;
else {
flr |= 1 << offset;
/* opposite compared to the datasheet, but it mirrors the
* reality
*/
if (trigger & IRQ_TYPE_EDGE_FALLING)
lvr |= 1 << offset;
else
lvr &= ~(1 << offset);
}
iowrite32(lvr, tgpio->membase + TGPIO_LVR);
iowrite32(flr, tgpio->membase + TGPIO_FLR);
iowrite32(1 << offset, tgpio->membase + TGPIO_ICR);
spin_unlock_irqrestore(&tgpio->lock, flags);
return 0;
}
static void timbgpio_irq(unsigned int irq, struct irq_desc *desc)
{
struct timbgpio *tgpio = get_irq_data(irq);
unsigned long ipr;
int offset;
desc->chip->ack(irq);
ipr = ioread32(tgpio->membase + TGPIO_IPR);
iowrite32(ipr, tgpio->membase + TGPIO_ICR);
for_each_bit(offset, &ipr, tgpio->gpio.ngpio)
generic_handle_irq(timbgpio_to_irq(&tgpio->gpio, offset));
}
static struct irq_chip timbgpio_irqchip = {
.name = "GPIO",
.enable = timbgpio_irq_enable,
.disable = timbgpio_irq_disable,
.set_type = timbgpio_irq_type,
};
static int __devinit timbgpio_probe(struct platform_device *pdev)
{
int err, i;
struct gpio_chip *gc;
struct timbgpio *tgpio;
struct resource *iomem;
struct timbgpio_platform_data *pdata = pdev->dev.platform_data;
int irq = platform_get_irq(pdev, 0);
if (!pdata || pdata->nr_pins > 32) {
err = -EINVAL;
goto err_mem;
}
iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!iomem) {
err = -EINVAL;
goto err_mem;
}
tgpio = kzalloc(sizeof(*tgpio), GFP_KERNEL);
if (!tgpio) {
err = -EINVAL;
goto err_mem;
}
tgpio->irq_base = pdata->irq_base;
spin_lock_init(&tgpio->lock);
if (!request_mem_region(iomem->start, resource_size(iomem),
DRIVER_NAME)) {
err = -EBUSY;
goto err_request;
}
tgpio->membase = ioremap(iomem->start, resource_size(iomem));
if (!tgpio->membase) {
err = -ENOMEM;
goto err_ioremap;
}
gc = &tgpio->gpio;
gc->label = dev_name(&pdev->dev);
gc->owner = THIS_MODULE;
gc->dev = &pdev->dev;
gc->direction_input = timbgpio_gpio_direction_input;
gc->get = timbgpio_gpio_get;
gc->direction_output = timbgpio_gpio_direction_output;
gc->set = timbgpio_gpio_set;
gc->to_irq = (irq >= 0 && tgpio->irq_base > 0) ? timbgpio_to_irq : NULL;
gc->dbg_show = NULL;
gc->base = pdata->gpio_base;
gc->ngpio = pdata->nr_pins;
gc->can_sleep = 0;
err = gpiochip_add(gc);
if (err)
goto err_chipadd;
platform_set_drvdata(pdev, tgpio);
/* make sure to disable interrupts */
iowrite32(0x0, tgpio->membase + TGPIO_IER);
if (irq < 0 || tgpio->irq_base <= 0)
return 0;
for (i = 0; i < pdata->nr_pins; i++) {
set_irq_chip_and_handler_name(tgpio->irq_base + i,
&timbgpio_irqchip, handle_simple_irq, "mux");
set_irq_chip_data(tgpio->irq_base + i, tgpio);
#ifdef CONFIG_ARM
set_irq_flags(tgpio->irq_base + i, IRQF_VALID | IRQF_PROBE);
#endif
}
set_irq_data(irq, tgpio);
set_irq_chained_handler(irq, timbgpio_irq);
return 0;
err_chipadd:
iounmap(tgpio->membase);
err_ioremap:
release_mem_region(iomem->start, resource_size(iomem));
err_request:
kfree(tgpio);
err_mem:
printk(KERN_ERR DRIVER_NAME": Failed to register GPIOs: %d\n", err);
return err;
}
static int __devexit timbgpio_remove(struct platform_device *pdev)
{
int err;
struct timbgpio_platform_data *pdata = pdev->dev.platform_data;
struct timbgpio *tgpio = platform_get_drvdata(pdev);
struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
int irq = platform_get_irq(pdev, 0);
if (irq >= 0 && tgpio->irq_base > 0) {
int i;
for (i = 0; i < pdata->nr_pins; i++) {
set_irq_chip(tgpio->irq_base + i, NULL);
set_irq_chip_data(tgpio->irq_base + i, NULL);
}
set_irq_handler(irq, NULL);
set_irq_data(irq, NULL);
}
err = gpiochip_remove(&tgpio->gpio);
if (err)
printk(KERN_ERR DRIVER_NAME": failed to remove gpio_chip\n");
iounmap(tgpio->membase);
release_mem_region(iomem->start, resource_size(iomem));
kfree(tgpio);
platform_set_drvdata(pdev, NULL);
return 0;
}
static struct platform_driver timbgpio_platform_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
},
.probe = timbgpio_probe,
.remove = timbgpio_remove,
};
/*--------------------------------------------------------------------------*/
static int __init timbgpio_init(void)
{
return platform_driver_register(&timbgpio_platform_driver);
}
static void __exit timbgpio_exit(void)
{
platform_driver_unregister(&timbgpio_platform_driver);
}
module_init(timbgpio_init);
module_exit(timbgpio_exit);
MODULE_DESCRIPTION("Timberdale GPIO driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Mocean Laboratories");
MODULE_ALIAS("platform:"DRIVER_NAME);

View file

@ -53,6 +53,17 @@ struct gru_chiplet_info {
int free_user_cbr;
};
/*
* Statictics kept for each context.
*/
struct gru_gseg_statistics {
unsigned long fmm_tlbmiss;
unsigned long upm_tlbmiss;
unsigned long tlbdropin;
unsigned long context_stolen;
unsigned long reserved[10];
};
/* Flags for GRU options on the gru_create_context() call */
/* Select one of the follow 4 options to specify how TLB misses are handled */
#define GRU_OPT_MISS_DEFAULT 0x0000 /* Use default mode */

View file

@ -34,17 +34,17 @@ extern void gru_wait_abort_proc(void *cb);
#include <asm/intrinsics.h>
#define __flush_cache(p) ia64_fc((unsigned long)p)
/* Use volatile on IA64 to ensure ordering via st4.rel */
#define gru_ordered_store_int(p, v) \
#define gru_ordered_store_ulong(p, v) \
do { \
barrier(); \
*((volatile int *)(p)) = v; /* force st.rel */ \
*((volatile unsigned long *)(p)) = v; /* force st.rel */ \
} while (0)
#elif defined(CONFIG_X86_64)
#define __flush_cache(p) clflush(p)
#define gru_ordered_store_int(p, v) \
#define gru_ordered_store_ulong(p, v) \
do { \
barrier(); \
*(int *)p = v; \
*(unsigned long *)p = v; \
} while (0)
#else
#error "Unsupported architecture"
@ -129,8 +129,13 @@ struct gru_instruction_bits {
*/
struct gru_instruction {
/* DW 0 */
unsigned int op32; /* icmd,xtype,iaa0,ima,opc */
unsigned int tri0;
union {
unsigned long op64; /* icmd,xtype,iaa0,ima,opc,tri0 */
struct {
unsigned int op32;
unsigned int tri0;
};
};
unsigned long tri1_bufsize; /* DW 1 */
unsigned long baddr0; /* DW 2 */
unsigned long nelem; /* DW 3 */
@ -140,7 +145,7 @@ struct gru_instruction {
unsigned long avalue; /* DW 7 */
};
/* Some shifts and masks for the low 32 bits of a GRU command */
/* Some shifts and masks for the low 64 bits of a GRU command */
#define GRU_CB_ICMD_SHFT 0
#define GRU_CB_ICMD_MASK 0x1
#define GRU_CB_XTYPE_SHFT 8
@ -155,6 +160,10 @@ struct gru_instruction {
#define GRU_CB_OPC_MASK 0xff
#define GRU_CB_EXOPC_SHFT 24
#define GRU_CB_EXOPC_MASK 0xff
#define GRU_IDEF2_SHFT 32
#define GRU_IDEF2_MASK 0x3ffff
#define GRU_ISTATUS_SHFT 56
#define GRU_ISTATUS_MASK 0x3
/* GRU instruction opcodes (opc field) */
#define OP_NOP 0x00
@ -256,6 +265,7 @@ struct gru_instruction {
#define CBE_CAUSE_PROTOCOL_STATE_DATA_ERROR (1 << 16)
#define CBE_CAUSE_RA_RESPONSE_DATA_ERROR (1 << 17)
#define CBE_CAUSE_HA_RESPONSE_DATA_ERROR (1 << 18)
#define CBE_CAUSE_FORCED_ERROR (1 << 19)
/* CBE cbrexecstatus bits */
#define CBR_EXS_ABORT_OCC_BIT 0
@ -264,13 +274,15 @@ struct gru_instruction {
#define CBR_EXS_QUEUED_BIT 3
#define CBR_EXS_TLB_INVAL_BIT 4
#define CBR_EXS_EXCEPTION_BIT 5
#define CBR_EXS_CB_INT_PENDING_BIT 6
#define CBR_EXS_ABORT_OCC (1 << CBR_EXS_ABORT_OCC_BIT)
#define CBR_EXS_INT_OCC (1 << CBR_EXS_INT_OCC_BIT)
#define CBR_EXS_PENDING (1 << CBR_EXS_PENDING_BIT)
#define CBR_EXS_QUEUED (1 << CBR_EXS_QUEUED_BIT)
#define CBR_TLB_INVAL (1 << CBR_EXS_TLB_INVAL_BIT)
#define CBR_EXS_TLB_INVAL (1 << CBR_EXS_TLB_INVAL_BIT)
#define CBR_EXS_EXCEPTION (1 << CBR_EXS_EXCEPTION_BIT)
#define CBR_EXS_CB_INT_PENDING (1 << CBR_EXS_CB_INT_PENDING_BIT)
/*
* Exceptions are retried for the following cases. If any OTHER bits are set
@ -296,12 +308,14 @@ union gru_mesqhead {
/* Generate the low word of a GRU instruction */
static inline unsigned int
__opword(unsigned char opcode, unsigned char exopc, unsigned char xtype,
static inline unsigned long
__opdword(unsigned char opcode, unsigned char exopc, unsigned char xtype,
unsigned char iaa0, unsigned char iaa1,
unsigned char ima)
unsigned long idef2, unsigned char ima)
{
return (1 << GRU_CB_ICMD_SHFT) |
((unsigned long)CBS_ACTIVE << GRU_ISTATUS_SHFT) |
(idef2<< GRU_IDEF2_SHFT) |
(iaa0 << GRU_CB_IAA0_SHFT) |
(iaa1 << GRU_CB_IAA1_SHFT) |
(ima << GRU_CB_IMA_SHFT) |
@ -319,12 +333,13 @@ static inline void gru_flush_cache(void *p)
}
/*
* Store the lower 32 bits of the command including the "start" bit. Then
* Store the lower 64 bits of the command including the "start" bit. Then
* start the instruction executing.
*/
static inline void gru_start_instruction(struct gru_instruction *ins, int op32)
static inline void gru_start_instruction(struct gru_instruction *ins, unsigned long op64)
{
gru_ordered_store_int(ins, op32);
gru_ordered_store_ulong(ins, op64);
mb();
gru_flush_cache(ins);
}
@ -340,6 +355,30 @@ static inline void gru_start_instruction(struct gru_instruction *ins, int op32)
* - nelem and stride are in elements
* - tri0/tri1 is in bytes for the beginning of the data segment.
*/
static inline void gru_vload_phys(void *cb, unsigned long gpa,
unsigned int tri0, int iaa, unsigned long hints)
{
struct gru_instruction *ins = (struct gru_instruction *)cb;
ins->baddr0 = (long)gpa | ((unsigned long)iaa << 62);
ins->nelem = 1;
ins->op1_stride = 1;
gru_start_instruction(ins, __opdword(OP_VLOAD, 0, XTYPE_DW, iaa, 0,
(unsigned long)tri0, CB_IMA(hints)));
}
static inline void gru_vstore_phys(void *cb, unsigned long gpa,
unsigned int tri0, int iaa, unsigned long hints)
{
struct gru_instruction *ins = (struct gru_instruction *)cb;
ins->baddr0 = (long)gpa | ((unsigned long)iaa << 62);
ins->nelem = 1;
ins->op1_stride = 1;
gru_start_instruction(ins, __opdword(OP_VSTORE, 0, XTYPE_DW, iaa, 0,
(unsigned long)tri0, CB_IMA(hints)));
}
static inline void gru_vload(void *cb, unsigned long mem_addr,
unsigned int tri0, unsigned char xtype, unsigned long nelem,
unsigned long stride, unsigned long hints)
@ -348,10 +387,9 @@ static inline void gru_vload(void *cb, unsigned long mem_addr,
ins->baddr0 = (long)mem_addr;
ins->nelem = nelem;
ins->tri0 = tri0;
ins->op1_stride = stride;
gru_start_instruction(ins, __opword(OP_VLOAD, 0, xtype, IAA_RAM, 0,
CB_IMA(hints)));
gru_start_instruction(ins, __opdword(OP_VLOAD, 0, xtype, IAA_RAM, 0,
(unsigned long)tri0, CB_IMA(hints)));
}
static inline void gru_vstore(void *cb, unsigned long mem_addr,
@ -362,10 +400,9 @@ static inline void gru_vstore(void *cb, unsigned long mem_addr,
ins->baddr0 = (long)mem_addr;
ins->nelem = nelem;
ins->tri0 = tri0;
ins->op1_stride = stride;
gru_start_instruction(ins, __opword(OP_VSTORE, 0, xtype, IAA_RAM, 0,
CB_IMA(hints)));
gru_start_instruction(ins, __opdword(OP_VSTORE, 0, xtype, IAA_RAM, 0,
tri0, CB_IMA(hints)));
}
static inline void gru_ivload(void *cb, unsigned long mem_addr,
@ -376,10 +413,9 @@ static inline void gru_ivload(void *cb, unsigned long mem_addr,
ins->baddr0 = (long)mem_addr;
ins->nelem = nelem;
ins->tri0 = tri0;
ins->tri1_bufsize = tri1;
gru_start_instruction(ins, __opword(OP_IVLOAD, 0, xtype, IAA_RAM, 0,
CB_IMA(hints)));
gru_start_instruction(ins, __opdword(OP_IVLOAD, 0, xtype, IAA_RAM, 0,
tri0, CB_IMA(hints)));
}
static inline void gru_ivstore(void *cb, unsigned long mem_addr,
@ -390,10 +426,9 @@ static inline void gru_ivstore(void *cb, unsigned long mem_addr,
ins->baddr0 = (long)mem_addr;
ins->nelem = nelem;
ins->tri0 = tri0;
ins->tri1_bufsize = tri1;
gru_start_instruction(ins, __opword(OP_IVSTORE, 0, xtype, IAA_RAM, 0,
CB_IMA(hints)));
gru_start_instruction(ins, __opdword(OP_IVSTORE, 0, xtype, IAA_RAM, 0,
tri0, CB_IMA(hints)));
}
static inline void gru_vset(void *cb, unsigned long mem_addr,
@ -406,8 +441,8 @@ static inline void gru_vset(void *cb, unsigned long mem_addr,
ins->op2_value_baddr1 = value;
ins->nelem = nelem;
ins->op1_stride = stride;
gru_start_instruction(ins, __opword(OP_VSET, 0, xtype, IAA_RAM, 0,
CB_IMA(hints)));
gru_start_instruction(ins, __opdword(OP_VSET, 0, xtype, IAA_RAM, 0,
0, CB_IMA(hints)));
}
static inline void gru_ivset(void *cb, unsigned long mem_addr,
@ -420,8 +455,8 @@ static inline void gru_ivset(void *cb, unsigned long mem_addr,
ins->op2_value_baddr1 = value;
ins->nelem = nelem;
ins->tri1_bufsize = tri1;
gru_start_instruction(ins, __opword(OP_IVSET, 0, xtype, IAA_RAM, 0,
CB_IMA(hints)));
gru_start_instruction(ins, __opdword(OP_IVSET, 0, xtype, IAA_RAM, 0,
0, CB_IMA(hints)));
}
static inline void gru_vflush(void *cb, unsigned long mem_addr,
@ -433,15 +468,15 @@ static inline void gru_vflush(void *cb, unsigned long mem_addr,
ins->baddr0 = (long)mem_addr;
ins->op1_stride = stride;
ins->nelem = nelem;
gru_start_instruction(ins, __opword(OP_VFLUSH, 0, xtype, IAA_RAM, 0,
CB_IMA(hints)));
gru_start_instruction(ins, __opdword(OP_VFLUSH, 0, xtype, IAA_RAM, 0,
0, CB_IMA(hints)));
}
static inline void gru_nop(void *cb, int hints)
{
struct gru_instruction *ins = (void *)cb;
gru_start_instruction(ins, __opword(OP_NOP, 0, 0, 0, 0, CB_IMA(hints)));
gru_start_instruction(ins, __opdword(OP_NOP, 0, 0, 0, 0, 0, CB_IMA(hints)));
}
@ -455,10 +490,9 @@ static inline void gru_bcopy(void *cb, const unsigned long src,
ins->baddr0 = (long)src;
ins->op2_value_baddr1 = (long)dest;
ins->nelem = nelem;
ins->tri0 = tri0;
ins->tri1_bufsize = bufsize;
gru_start_instruction(ins, __opword(OP_BCOPY, 0, xtype, IAA_RAM,
IAA_RAM, CB_IMA(hints)));
gru_start_instruction(ins, __opdword(OP_BCOPY, 0, xtype, IAA_RAM,
IAA_RAM, tri0, CB_IMA(hints)));
}
static inline void gru_bstore(void *cb, const unsigned long src,
@ -470,9 +504,8 @@ static inline void gru_bstore(void *cb, const unsigned long src,
ins->baddr0 = (long)src;
ins->op2_value_baddr1 = (long)dest;
ins->nelem = nelem;
ins->tri0 = tri0;
gru_start_instruction(ins, __opword(OP_BSTORE, 0, xtype, 0, IAA_RAM,
CB_IMA(hints)));
gru_start_instruction(ins, __opdword(OP_BSTORE, 0, xtype, 0, IAA_RAM,
tri0, CB_IMA(hints)));
}
static inline void gru_gamir(void *cb, int exopc, unsigned long src,
@ -481,8 +514,8 @@ static inline void gru_gamir(void *cb, int exopc, unsigned long src,
struct gru_instruction *ins = (void *)cb;
ins->baddr0 = (long)src;
gru_start_instruction(ins, __opword(OP_GAMIR, exopc, xtype, IAA_RAM, 0,
CB_IMA(hints)));
gru_start_instruction(ins, __opdword(OP_GAMIR, exopc, xtype, IAA_RAM, 0,
0, CB_IMA(hints)));
}
static inline void gru_gamirr(void *cb, int exopc, unsigned long src,
@ -491,8 +524,8 @@ static inline void gru_gamirr(void *cb, int exopc, unsigned long src,
struct gru_instruction *ins = (void *)cb;
ins->baddr0 = (long)src;
gru_start_instruction(ins, __opword(OP_GAMIRR, exopc, xtype, IAA_RAM, 0,
CB_IMA(hints)));
gru_start_instruction(ins, __opdword(OP_GAMIRR, exopc, xtype, IAA_RAM, 0,
0, CB_IMA(hints)));
}
static inline void gru_gamer(void *cb, int exopc, unsigned long src,
@ -505,8 +538,8 @@ static inline void gru_gamer(void *cb, int exopc, unsigned long src,
ins->baddr0 = (long)src;
ins->op1_stride = operand1;
ins->op2_value_baddr1 = operand2;
gru_start_instruction(ins, __opword(OP_GAMER, exopc, xtype, IAA_RAM, 0,
CB_IMA(hints)));
gru_start_instruction(ins, __opdword(OP_GAMER, exopc, xtype, IAA_RAM, 0,
0, CB_IMA(hints)));
}
static inline void gru_gamerr(void *cb, int exopc, unsigned long src,
@ -518,8 +551,8 @@ static inline void gru_gamerr(void *cb, int exopc, unsigned long src,
ins->baddr0 = (long)src;
ins->op1_stride = operand1;
ins->op2_value_baddr1 = operand2;
gru_start_instruction(ins, __opword(OP_GAMERR, exopc, xtype, IAA_RAM, 0,
CB_IMA(hints)));
gru_start_instruction(ins, __opdword(OP_GAMERR, exopc, xtype, IAA_RAM, 0,
0, CB_IMA(hints)));
}
static inline void gru_gamxr(void *cb, unsigned long src,
@ -529,8 +562,8 @@ static inline void gru_gamxr(void *cb, unsigned long src,
ins->baddr0 = (long)src;
ins->nelem = 4;
gru_start_instruction(ins, __opword(OP_GAMXR, EOP_XR_CSWAP, XTYPE_DW,
IAA_RAM, 0, CB_IMA(hints)));
gru_start_instruction(ins, __opdword(OP_GAMXR, EOP_XR_CSWAP, XTYPE_DW,
IAA_RAM, 0, 0, CB_IMA(hints)));
}
static inline void gru_mesq(void *cb, unsigned long queue,
@ -541,9 +574,8 @@ static inline void gru_mesq(void *cb, unsigned long queue,
ins->baddr0 = (long)queue;
ins->nelem = nelem;
ins->tri0 = tri0;
gru_start_instruction(ins, __opword(OP_MESQ, 0, XTYPE_CL, IAA_RAM, 0,
CB_IMA(hints)));
gru_start_instruction(ins, __opdword(OP_MESQ, 0, XTYPE_CL, IAA_RAM, 0,
tri0, CB_IMA(hints)));
}
static inline unsigned long gru_get_amo_value(void *cb)
@ -662,6 +694,14 @@ static inline void gru_wait_abort(void *cb)
gru_wait_abort_proc(cb);
}
/*
* Get a pointer to the start of a gseg
* p - Any valid pointer within the gseg
*/
static inline void *gru_get_gseg_pointer (void *p)
{
return (void *)((unsigned long)p & ~(GRU_GSEG_PAGESIZE - 1));
}
/*
* Get a pointer to a control block

View file

@ -40,6 +40,12 @@
#include "gru_instructions.h"
#include <asm/uv/uv_hub.h>
/* Return codes for vtop functions */
#define VTOP_SUCCESS 0
#define VTOP_INVALID -1
#define VTOP_RETRY -2
/*
* Test if a physical address is a valid GRU GSEG address
*/
@ -90,19 +96,22 @@ static struct gru_thread_state *gru_alloc_locked_gts(unsigned long vaddr)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
struct gru_thread_state *gts = NULL;
struct gru_thread_state *gts = ERR_PTR(-EINVAL);
down_write(&mm->mmap_sem);
vma = gru_find_vma(vaddr);
if (vma)
gts = gru_alloc_thread_state(vma, TSID(vaddr, vma));
if (gts) {
mutex_lock(&gts->ts_ctxlock);
downgrade_write(&mm->mmap_sem);
} else {
up_write(&mm->mmap_sem);
}
if (!vma)
goto err;
gts = gru_alloc_thread_state(vma, TSID(vaddr, vma));
if (IS_ERR(gts))
goto err;
mutex_lock(&gts->ts_ctxlock);
downgrade_write(&mm->mmap_sem);
return gts;
err:
up_write(&mm->mmap_sem);
return gts;
}
@ -122,38 +131,14 @@ static void gru_unlock_gts(struct gru_thread_state *gts)
* is necessary to prevent the user from seeing a stale cb.istatus that will
* change as soon as the TFH restart is complete. Races may cause an
* occasional failure to clear the cb.istatus, but that is ok.
*
* If the cb address is not valid (should not happen, but...), nothing
* bad will happen.. The get_user()/put_user() will fail but there
* are no bad side-effects.
*/
static void gru_cb_set_istatus_active(unsigned long __user *cb)
static void gru_cb_set_istatus_active(struct gru_instruction_bits *cbk)
{
union {
struct gru_instruction_bits bits;
unsigned long dw;
} u;
if (cb) {
get_user(u.dw, cb);
u.bits.istatus = CBS_ACTIVE;
put_user(u.dw, cb);
if (cbk) {
cbk->istatus = CBS_ACTIVE;
}
}
/*
* Convert a interrupt IRQ to a pointer to the GRU GTS that caused the
* interrupt. Interrupts are always sent to a cpu on the blade that contains the
* GRU (except for headless blades which are not currently supported). A blade
* has N grus; a block of N consecutive IRQs is assigned to the GRUs. The IRQ
* number uniquely identifies the GRU chiplet on the local blade that caused the
* interrupt. Always called in interrupt context.
*/
static inline struct gru_state *irq_to_gru(int irq)
{
return &gru_base[uv_numa_blade_id()]->bs_grus[irq - IRQ_GRU];
}
/*
* Read & clear a TFM
*
@ -207,10 +192,11 @@ static int non_atomic_pte_lookup(struct vm_area_struct *vma,
{
struct page *page;
/* ZZZ Need to handle HUGE pages */
if (is_vm_hugetlb_page(vma))
return -EFAULT;
#ifdef CONFIG_HUGETLB_PAGE
*pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
#else
*pageshift = PAGE_SHIFT;
#endif
if (get_user_pages
(current, current->mm, vaddr, 1, write, 0, &page, NULL) <= 0)
return -EFAULT;
@ -268,7 +254,6 @@ static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
return 0;
err:
local_irq_enable();
return 1;
}
@ -301,15 +286,70 @@ static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
paddr = paddr & ~((1UL << ps) - 1);
*gpa = uv_soc_phys_ram_to_gpa(paddr);
*pageshift = ps;
return 0;
return VTOP_SUCCESS;
inval:
return -1;
return VTOP_INVALID;
upm:
return -2;
return VTOP_RETRY;
}
/*
* Flush a CBE from cache. The CBE is clean in the cache. Dirty the
* CBE cacheline so that the line will be written back to home agent.
* Otherwise the line may be silently dropped. This has no impact
* except on performance.
*/
static void gru_flush_cache_cbe(struct gru_control_block_extended *cbe)
{
if (unlikely(cbe)) {
cbe->cbrexecstatus = 0; /* make CL dirty */
gru_flush_cache(cbe);
}
}
/*
* Preload the TLB with entries that may be required. Currently, preloading
* is implemented only for BCOPY. Preload <tlb_preload_count> pages OR to
* the end of the bcopy tranfer, whichever is smaller.
*/
static void gru_preload_tlb(struct gru_state *gru,
struct gru_thread_state *gts, int atomic,
unsigned long fault_vaddr, int asid, int write,
unsigned char tlb_preload_count,
struct gru_tlb_fault_handle *tfh,
struct gru_control_block_extended *cbe)
{
unsigned long vaddr = 0, gpa;
int ret, pageshift;
if (cbe->opccpy != OP_BCOPY)
return;
if (fault_vaddr == cbe->cbe_baddr0)
vaddr = fault_vaddr + GRU_CACHE_LINE_BYTES * cbe->cbe_src_cl - 1;
else if (fault_vaddr == cbe->cbe_baddr1)
vaddr = fault_vaddr + (1 << cbe->xtypecpy) * cbe->cbe_nelemcur - 1;
fault_vaddr &= PAGE_MASK;
vaddr &= PAGE_MASK;
vaddr = min(vaddr, fault_vaddr + tlb_preload_count * PAGE_SIZE);
while (vaddr > fault_vaddr) {
ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
if (ret || tfh_write_only(tfh, gpa, GAA_RAM, vaddr, asid, write,
GRU_PAGESIZE(pageshift)))
return;
gru_dbg(grudev,
"%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, rw %d, ps %d, gpa 0x%lx\n",
atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh,
vaddr, asid, write, pageshift, gpa);
vaddr -= PAGE_SIZE;
STAT(tlb_preload_page);
}
}
/*
* Drop a TLB entry into the GRU. The fault is described by info in an TFH.
* Input:
@ -320,11 +360,14 @@ static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
* < 0 = error code
*
*/
static int gru_try_dropin(struct gru_thread_state *gts,
static int gru_try_dropin(struct gru_state *gru,
struct gru_thread_state *gts,
struct gru_tlb_fault_handle *tfh,
unsigned long __user *cb)
struct gru_instruction_bits *cbk)
{
int pageshift = 0, asid, write, ret, atomic = !cb;
struct gru_control_block_extended *cbe = NULL;
unsigned char tlb_preload_count = gts->ts_tlb_preload_count;
int pageshift = 0, asid, write, ret, atomic = !cbk, indexway;
unsigned long gpa = 0, vaddr = 0;
/*
@ -334,6 +377,14 @@ static int gru_try_dropin(struct gru_thread_state *gts,
* the dropin is ignored. This eliminates the need for additional locks.
*/
/*
* Prefetch the CBE if doing TLB preloading
*/
if (unlikely(tlb_preload_count)) {
cbe = gru_tfh_to_cbe(tfh);
prefetchw(cbe);
}
/*
* Error if TFH state is IDLE or FMM mode & the user issuing a UPM call.
* Might be a hardware race OR a stupid user. Ignore FMM because FMM
@ -341,18 +392,20 @@ static int gru_try_dropin(struct gru_thread_state *gts,
*/
if (tfh->status != TFHSTATUS_EXCEPTION) {
gru_flush_cache(tfh);
sync_core();
if (tfh->status != TFHSTATUS_EXCEPTION)
goto failnoexception;
STAT(tfh_stale_on_fault);
}
if (tfh->state == TFHSTATE_IDLE)
goto failidle;
if (tfh->state == TFHSTATE_MISS_FMM && cb)
if (tfh->state == TFHSTATE_MISS_FMM && cbk)
goto failfmm;
write = (tfh->cause & TFHCAUSE_TLB_MOD) != 0;
vaddr = tfh->missvaddr;
asid = tfh->missasid;
indexway = tfh->indexway;
if (asid == 0)
goto failnoasid;
@ -366,41 +419,51 @@ static int gru_try_dropin(struct gru_thread_state *gts,
goto failactive;
ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
if (ret == -1)
if (ret == VTOP_INVALID)
goto failinval;
if (ret == -2)
if (ret == VTOP_RETRY)
goto failupm;
if (!(gts->ts_sizeavail & GRU_SIZEAVAIL(pageshift))) {
gts->ts_sizeavail |= GRU_SIZEAVAIL(pageshift);
if (atomic || !gru_update_cch(gts, 0)) {
if (atomic || !gru_update_cch(gts)) {
gts->ts_force_cch_reload = 1;
goto failupm;
}
}
gru_cb_set_istatus_active(cb);
if (unlikely(cbe) && pageshift == PAGE_SHIFT) {
gru_preload_tlb(gru, gts, atomic, vaddr, asid, write, tlb_preload_count, tfh, cbe);
gru_flush_cache_cbe(cbe);
}
gru_cb_set_istatus_active(cbk);
gts->ustats.tlbdropin++;
tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write,
GRU_PAGESIZE(pageshift));
STAT(tlb_dropin);
gru_dbg(grudev,
"%s: tfh 0x%p, vaddr 0x%lx, asid 0x%x, ps %d, gpa 0x%lx\n",
ret ? "non-atomic" : "atomic", tfh, vaddr, asid,
pageshift, gpa);
"%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, indexway 0x%x,"
" rw %d, ps %d, gpa 0x%lx\n",
atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh, vaddr, asid,
indexway, write, pageshift, gpa);
STAT(tlb_dropin);
return 0;
failnoasid:
/* No asid (delayed unload). */
STAT(tlb_dropin_fail_no_asid);
gru_dbg(grudev, "FAILED no_asid tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
if (!cb)
if (!cbk)
tfh_user_polling_mode(tfh);
else
gru_flush_cache(tfh);
gru_flush_cache_cbe(cbe);
return -EAGAIN;
failupm:
/* Atomic failure switch CBR to UPM */
tfh_user_polling_mode(tfh);
gru_flush_cache_cbe(cbe);
STAT(tlb_dropin_fail_upm);
gru_dbg(grudev, "FAILED upm tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
return 1;
@ -408,6 +471,7 @@ static int gru_try_dropin(struct gru_thread_state *gts,
failfmm:
/* FMM state on UPM call */
gru_flush_cache(tfh);
gru_flush_cache_cbe(cbe);
STAT(tlb_dropin_fail_fmm);
gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state);
return 0;
@ -415,17 +479,20 @@ static int gru_try_dropin(struct gru_thread_state *gts,
failnoexception:
/* TFH status did not show exception pending */
gru_flush_cache(tfh);
if (cb)
gru_flush_cache(cb);
gru_flush_cache_cbe(cbe);
if (cbk)
gru_flush_cache(cbk);
STAT(tlb_dropin_fail_no_exception);
gru_dbg(grudev, "FAILED non-exception tfh: 0x%p, status %d, state %d\n", tfh, tfh->status, tfh->state);
gru_dbg(grudev, "FAILED non-exception tfh: 0x%p, status %d, state %d\n",
tfh, tfh->status, tfh->state);
return 0;
failidle:
/* TFH state was idle - no miss pending */
gru_flush_cache(tfh);
if (cb)
gru_flush_cache(cb);
gru_flush_cache_cbe(cbe);
if (cbk)
gru_flush_cache(cbk);
STAT(tlb_dropin_fail_idle);
gru_dbg(grudev, "FAILED idle tfh: 0x%p, state %d\n", tfh, tfh->state);
return 0;
@ -433,16 +500,18 @@ static int gru_try_dropin(struct gru_thread_state *gts,
failinval:
/* All errors (atomic & non-atomic) switch CBR to EXCEPTION state */
tfh_exception(tfh);
gru_flush_cache_cbe(cbe);
STAT(tlb_dropin_fail_invalid);
gru_dbg(grudev, "FAILED inval tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
return -EFAULT;
failactive:
/* Range invalidate active. Switch to UPM iff atomic */
if (!cb)
if (!cbk)
tfh_user_polling_mode(tfh);
else
gru_flush_cache(tfh);
gru_flush_cache_cbe(cbe);
STAT(tlb_dropin_fail_range_active);
gru_dbg(grudev, "FAILED range active: tfh 0x%p, vaddr 0x%lx\n",
tfh, vaddr);
@ -455,31 +524,41 @@ static int gru_try_dropin(struct gru_thread_state *gts,
* Note that this is the interrupt handler that is registered with linux
* interrupt handlers.
*/
irqreturn_t gru_intr(int irq, void *dev_id)
static irqreturn_t gru_intr(int chiplet, int blade)
{
struct gru_state *gru;
struct gru_tlb_fault_map imap, dmap;
struct gru_thread_state *gts;
struct gru_tlb_fault_handle *tfh = NULL;
struct completion *cmp;
int cbrnum, ctxnum;
STAT(intr);
gru = irq_to_gru(irq);
gru = &gru_base[blade]->bs_grus[chiplet];
if (!gru) {
dev_err(grudev, "GRU: invalid interrupt: cpu %d, irq %d\n",
raw_smp_processor_id(), irq);
dev_err(grudev, "GRU: invalid interrupt: cpu %d, chiplet %d\n",
raw_smp_processor_id(), chiplet);
return IRQ_NONE;
}
get_clear_fault_map(gru, &imap, &dmap);
gru_dbg(grudev,
"cpu %d, chiplet %d, gid %d, imap %016lx %016lx, dmap %016lx %016lx\n",
smp_processor_id(), chiplet, gru->gs_gid,
imap.fault_bits[0], imap.fault_bits[1],
dmap.fault_bits[0], dmap.fault_bits[1]);
for_each_cbr_in_tfm(cbrnum, dmap.fault_bits) {
complete(gru->gs_blade->bs_async_wq);
STAT(intr_cbr);
cmp = gru->gs_blade->bs_async_wq;
if (cmp)
complete(cmp);
gru_dbg(grudev, "gid %d, cbr_done %d, done %d\n",
gru->gs_gid, cbrnum, gru->gs_blade->bs_async_wq->done);
gru->gs_gid, cbrnum, cmp ? cmp->done : -1);
}
for_each_cbr_in_tfm(cbrnum, imap.fault_bits) {
STAT(intr_tfh);
tfh = get_tfh_by_index(gru, cbrnum);
prefetchw(tfh); /* Helps on hdw, required for emulator */
@ -492,14 +571,20 @@ irqreturn_t gru_intr(int irq, void *dev_id)
ctxnum = tfh->ctxnum;
gts = gru->gs_gts[ctxnum];
/* Spurious interrupts can cause this. Ignore. */
if (!gts) {
STAT(intr_spurious);
continue;
}
/*
* This is running in interrupt context. Trylock the mmap_sem.
* If it fails, retry the fault in user context.
*/
gts->ustats.fmm_tlbmiss++;
if (!gts->ts_force_cch_reload &&
down_read_trylock(&gts->ts_mm->mmap_sem)) {
gts->ustats.fmm_tlbdropin++;
gru_try_dropin(gts, tfh, NULL);
gru_try_dropin(gru, gts, tfh, NULL);
up_read(&gts->ts_mm->mmap_sem);
} else {
tfh_user_polling_mode(tfh);
@ -509,20 +594,43 @@ irqreturn_t gru_intr(int irq, void *dev_id)
return IRQ_HANDLED;
}
irqreturn_t gru0_intr(int irq, void *dev_id)
{
return gru_intr(0, uv_numa_blade_id());
}
irqreturn_t gru1_intr(int irq, void *dev_id)
{
return gru_intr(1, uv_numa_blade_id());
}
irqreturn_t gru_intr_mblade(int irq, void *dev_id)
{
int blade;
for_each_possible_blade(blade) {
if (uv_blade_nr_possible_cpus(blade))
continue;
gru_intr(0, blade);
gru_intr(1, blade);
}
return IRQ_HANDLED;
}
static int gru_user_dropin(struct gru_thread_state *gts,
struct gru_tlb_fault_handle *tfh,
unsigned long __user *cb)
void *cb)
{
struct gru_mm_struct *gms = gts->ts_gms;
int ret;
gts->ustats.upm_tlbdropin++;
gts->ustats.upm_tlbmiss++;
while (1) {
wait_event(gms->ms_wait_queue,
atomic_read(&gms->ms_range_active) == 0);
prefetchw(tfh); /* Helps on hdw, required for emulator */
ret = gru_try_dropin(gts, tfh, cb);
ret = gru_try_dropin(gts->ts_gru, gts, tfh, cb);
if (ret <= 0)
return ret;
STAT(call_os_wait_queue);
@ -538,52 +646,41 @@ int gru_handle_user_call_os(unsigned long cb)
{
struct gru_tlb_fault_handle *tfh;
struct gru_thread_state *gts;
unsigned long __user *cbp;
void *cbk;
int ucbnum, cbrnum, ret = -EINVAL;
STAT(call_os);
gru_dbg(grudev, "address 0x%lx\n", cb);
/* sanity check the cb pointer */
ucbnum = get_cb_number((void *)cb);
if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB)
return -EINVAL;
cbp = (unsigned long *)cb;
gts = gru_find_lock_gts(cb);
if (!gts)
return -EINVAL;
gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts);
if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE)
goto exit;
/*
* If force_unload is set, the UPM TLB fault is phony. The task
* has migrated to another node and the GSEG must be moved. Just
* unload the context. The task will page fault and assign a new
* context.
*/
if (gts->ts_tgid_owner == current->tgid && gts->ts_blade >= 0 &&
gts->ts_blade != uv_numa_blade_id()) {
STAT(call_os_offnode_reference);
gts->ts_force_unload = 1;
}
gru_check_context_placement(gts);
/*
* CCH may contain stale data if ts_force_cch_reload is set.
*/
if (gts->ts_gru && gts->ts_force_cch_reload) {
gts->ts_force_cch_reload = 0;
gru_update_cch(gts, 0);
gru_update_cch(gts);
}
ret = -EAGAIN;
cbrnum = thread_cbr_number(gts, ucbnum);
if (gts->ts_force_unload) {
gru_unload_context(gts, 1);
} else if (gts->ts_gru) {
if (gts->ts_gru) {
tfh = get_tfh_by_index(gts->ts_gru, cbrnum);
ret = gru_user_dropin(gts, tfh, cbp);
cbk = get_gseg_base_address_cb(gts->ts_gru->gs_gru_base_vaddr,
gts->ts_ctxnum, ucbnum);
ret = gru_user_dropin(gts, tfh, cbk);
}
exit:
gru_unlock_gts(gts);
@ -605,11 +702,11 @@ int gru_get_exception_detail(unsigned long arg)
if (copy_from_user(&excdet, (void __user *)arg, sizeof(excdet)))
return -EFAULT;
gru_dbg(grudev, "address 0x%lx\n", excdet.cb);
gts = gru_find_lock_gts(excdet.cb);
if (!gts)
return -EINVAL;
gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", excdet.cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts);
ucbnum = get_cb_number((void *)excdet.cb);
if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) {
ret = -EINVAL;
@ -617,6 +714,7 @@ int gru_get_exception_detail(unsigned long arg)
cbrnum = thread_cbr_number(gts, ucbnum);
cbe = get_cbe_by_index(gts->ts_gru, cbrnum);
gru_flush_cache(cbe); /* CBE not coherent */
sync_core(); /* make sure we are have current data */
excdet.opc = cbe->opccpy;
excdet.exopc = cbe->exopccpy;
excdet.ecause = cbe->ecause;
@ -624,7 +722,7 @@ int gru_get_exception_detail(unsigned long arg)
excdet.exceptdet1 = cbe->idef3upd;
excdet.cbrstate = cbe->cbrstate;
excdet.cbrexecstatus = cbe->cbrexecstatus;
gru_flush_cache(cbe);
gru_flush_cache_cbe(cbe);
ret = 0;
} else {
ret = -EAGAIN;
@ -733,6 +831,11 @@ long gru_get_gseg_statistics(unsigned long arg)
if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
return -EFAULT;
/*
* The library creates arrays of contexts for threaded programs.
* If no gts exists in the array, the context has never been used & all
* statistics are implicitly 0.
*/
gts = gru_find_lock_gts(req.gseg);
if (gts) {
memcpy(&req.stats, &gts->ustats, sizeof(gts->ustats));
@ -762,11 +865,25 @@ int gru_set_context_option(unsigned long arg)
return -EFAULT;
gru_dbg(grudev, "op %d, gseg 0x%lx, value1 0x%lx\n", req.op, req.gseg, req.val1);
gts = gru_alloc_locked_gts(req.gseg);
if (!gts)
return -EINVAL;
gts = gru_find_lock_gts(req.gseg);
if (!gts) {
gts = gru_alloc_locked_gts(req.gseg);
if (IS_ERR(gts))
return PTR_ERR(gts);
}
switch (req.op) {
case sco_blade_chiplet:
/* Select blade/chiplet for GRU context */
if (req.val1 < -1 || req.val1 >= GRU_MAX_BLADES || !gru_base[req.val1] ||
req.val0 < -1 || req.val0 >= GRU_CHIPLETS_PER_HUB) {
ret = -EINVAL;
} else {
gts->ts_user_blade_id = req.val1;
gts->ts_user_chiplet_id = req.val0;
gru_check_context_placement(gts);
}
break;
case sco_gseg_owner:
/* Register the current task as the GSEG owner */
gts->ts_tgid_owner = current->tgid;

View file

@ -35,6 +35,9 @@
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/uaccess.h>
#ifdef CONFIG_X86_64
#include <asm/uv/uv_irq.h>
#endif
#include <asm/uv/uv.h>
#include "gru.h"
#include "grulib.h"
@ -130,7 +133,6 @@ static int gru_create_new_context(unsigned long arg)
struct gru_vma_data *vdata;
int ret = -EINVAL;
if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
return -EFAULT;
@ -150,6 +152,7 @@ static int gru_create_new_context(unsigned long arg)
vdata->vd_dsr_au_count =
GRU_DS_BYTES_TO_AU(req.data_segment_bytes);
vdata->vd_cbr_au_count = GRU_CB_COUNT_TO_AU(req.control_blocks);
vdata->vd_tlb_preload_count = req.tlb_preload_count;
ret = 0;
}
up_write(&current->mm->mmap_sem);
@ -190,7 +193,7 @@ static long gru_file_unlocked_ioctl(struct file *file, unsigned int req,
{
int err = -EBADRQC;
gru_dbg(grudev, "file %p\n", file);
gru_dbg(grudev, "file %p, req 0x%x, 0x%lx\n", file, req, arg);
switch (req) {
case GRU_CREATE_CONTEXT:
@ -232,23 +235,24 @@ static long gru_file_unlocked_ioctl(struct file *file, unsigned int req,
* system.
*/
static void gru_init_chiplet(struct gru_state *gru, unsigned long paddr,
void *vaddr, int nid, int bid, int grunum)
void *vaddr, int blade_id, int chiplet_id)
{
spin_lock_init(&gru->gs_lock);
spin_lock_init(&gru->gs_asid_lock);
gru->gs_gru_base_paddr = paddr;
gru->gs_gru_base_vaddr = vaddr;
gru->gs_gid = bid * GRU_CHIPLETS_PER_BLADE + grunum;
gru->gs_blade = gru_base[bid];
gru->gs_blade_id = bid;
gru->gs_gid = blade_id * GRU_CHIPLETS_PER_BLADE + chiplet_id;
gru->gs_blade = gru_base[blade_id];
gru->gs_blade_id = blade_id;
gru->gs_chiplet_id = chiplet_id;
gru->gs_cbr_map = (GRU_CBR_AU == 64) ? ~0 : (1UL << GRU_CBR_AU) - 1;
gru->gs_dsr_map = (1UL << GRU_DSR_AU) - 1;
gru->gs_asid_limit = MAX_ASID;
gru_tgh_flush_init(gru);
if (gru->gs_gid >= gru_max_gids)
gru_max_gids = gru->gs_gid + 1;
gru_dbg(grudev, "bid %d, nid %d, gid %d, vaddr %p (0x%lx)\n",
bid, nid, gru->gs_gid, gru->gs_gru_base_vaddr,
gru_dbg(grudev, "bid %d, gid %d, vaddr %p (0x%lx)\n",
blade_id, gru->gs_gid, gru->gs_gru_base_vaddr,
gru->gs_gru_base_paddr);
}
@ -264,12 +268,10 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr)
max_user_cbrs = GRU_NUM_CB;
max_user_dsr_bytes = GRU_NUM_DSR_BYTES;
for_each_online_node(nid) {
bid = uv_node_to_blade_id(nid);
pnode = uv_node_to_pnode(nid);
if (bid < 0 || gru_base[bid])
continue;
page = alloc_pages_exact_node(nid, GFP_KERNEL, order);
for_each_possible_blade(bid) {
pnode = uv_blade_to_pnode(bid);
nid = uv_blade_to_memory_nid(bid);/* -1 if no memory on blade */
page = alloc_pages_node(nid, GFP_KERNEL, order);
if (!page)
goto fail;
gru_base[bid] = page_address(page);
@ -285,7 +287,7 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr)
chip++, gru++) {
paddr = gru_chiplet_paddr(gru_base_paddr, pnode, chip);
vaddr = gru_chiplet_vaddr(gru_base_vaddr, pnode, chip);
gru_init_chiplet(gru, paddr, vaddr, nid, bid, chip);
gru_init_chiplet(gru, paddr, vaddr, bid, chip);
n = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE;
cbrs = max(cbrs, n);
n = hweight64(gru->gs_dsr_map) * GRU_DSR_AU_BYTES;
@ -298,39 +300,215 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr)
return 0;
fail:
for (nid--; nid >= 0; nid--)
free_pages((unsigned long)gru_base[nid], order);
for (bid--; bid >= 0; bid--)
free_pages((unsigned long)gru_base[bid], order);
return -ENOMEM;
}
static void gru_free_tables(void)
{
int bid;
int order = get_order(sizeof(struct gru_state) *
GRU_CHIPLETS_PER_BLADE);
for (bid = 0; bid < GRU_MAX_BLADES; bid++)
free_pages((unsigned long)gru_base[bid], order);
}
static unsigned long gru_chiplet_cpu_to_mmr(int chiplet, int cpu, int *corep)
{
unsigned long mmr = 0;
int core;
/*
* We target the cores of a blade and not the hyperthreads themselves.
* There is a max of 8 cores per socket and 2 sockets per blade,
* making for a max total of 16 cores (i.e., 16 CPUs without
* hyperthreading and 32 CPUs with hyperthreading).
*/
core = uv_cpu_core_number(cpu) + UV_MAX_INT_CORES * uv_cpu_socket_number(cpu);
if (core >= GRU_NUM_TFM || uv_cpu_ht_number(cpu))
return 0;
if (chiplet == 0) {
mmr = UVH_GR0_TLB_INT0_CONFIG +
core * (UVH_GR0_TLB_INT1_CONFIG - UVH_GR0_TLB_INT0_CONFIG);
} else if (chiplet == 1) {
mmr = UVH_GR1_TLB_INT0_CONFIG +
core * (UVH_GR1_TLB_INT1_CONFIG - UVH_GR1_TLB_INT0_CONFIG);
} else {
BUG();
}
*corep = core;
return mmr;
}
#ifdef CONFIG_IA64
static int get_base_irq(void)
static int gru_irq_count[GRU_CHIPLETS_PER_BLADE];
static void gru_noop(unsigned int irq)
{
return IRQ_GRU;
}
static struct irq_chip gru_chip[GRU_CHIPLETS_PER_BLADE] = {
[0 ... GRU_CHIPLETS_PER_BLADE - 1] {
.mask = gru_noop,
.unmask = gru_noop,
.ack = gru_noop
}
};
static int gru_chiplet_setup_tlb_irq(int chiplet, char *irq_name,
irq_handler_t irq_handler, int cpu, int blade)
{
unsigned long mmr;
int irq = IRQ_GRU + chiplet;
int ret, core;
mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
if (mmr == 0)
return 0;
if (gru_irq_count[chiplet] == 0) {
gru_chip[chiplet].name = irq_name;
ret = set_irq_chip(irq, &gru_chip[chiplet]);
if (ret) {
printk(KERN_ERR "%s: set_irq_chip failed, errno=%d\n",
GRU_DRIVER_ID_STR, -ret);
return ret;
}
ret = request_irq(irq, irq_handler, 0, irq_name, NULL);
if (ret) {
printk(KERN_ERR "%s: request_irq failed, errno=%d\n",
GRU_DRIVER_ID_STR, -ret);
return ret;
}
}
gru_irq_count[chiplet]++;
return 0;
}
static void gru_chiplet_teardown_tlb_irq(int chiplet, int cpu, int blade)
{
unsigned long mmr;
int core, irq = IRQ_GRU + chiplet;
if (gru_irq_count[chiplet] == 0)
return;
mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
if (mmr == 0)
return;
if (--gru_irq_count[chiplet] == 0)
free_irq(irq, NULL);
}
#elif defined CONFIG_X86_64
static void noop(unsigned int irq)
static int gru_chiplet_setup_tlb_irq(int chiplet, char *irq_name,
irq_handler_t irq_handler, int cpu, int blade)
{
unsigned long mmr;
int irq, core;
int ret;
mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
if (mmr == 0)
return 0;
irq = uv_setup_irq(irq_name, cpu, blade, mmr, UV_AFFINITY_CPU);
if (irq < 0) {
printk(KERN_ERR "%s: uv_setup_irq failed, errno=%d\n",
GRU_DRIVER_ID_STR, -irq);
return irq;
}
ret = request_irq(irq, irq_handler, 0, irq_name, NULL);
if (ret) {
uv_teardown_irq(irq);
printk(KERN_ERR "%s: request_irq failed, errno=%d\n",
GRU_DRIVER_ID_STR, -ret);
return ret;
}
gru_base[blade]->bs_grus[chiplet].gs_irq[core] = irq;
return 0;
}
static struct irq_chip gru_chip = {
.name = "gru",
.mask = noop,
.unmask = noop,
.ack = noop,
};
static int get_base_irq(void)
static void gru_chiplet_teardown_tlb_irq(int chiplet, int cpu, int blade)
{
set_irq_chip(IRQ_GRU, &gru_chip);
set_irq_chip(IRQ_GRU + 1, &gru_chip);
return IRQ_GRU;
int irq, core;
unsigned long mmr;
mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
if (mmr) {
irq = gru_base[blade]->bs_grus[chiplet].gs_irq[core];
if (irq) {
free_irq(irq, NULL);
uv_teardown_irq(irq);
}
}
}
#endif
static void gru_teardown_tlb_irqs(void)
{
int blade;
int cpu;
for_each_online_cpu(cpu) {
blade = uv_cpu_to_blade_id(cpu);
gru_chiplet_teardown_tlb_irq(0, cpu, blade);
gru_chiplet_teardown_tlb_irq(1, cpu, blade);
}
for_each_possible_blade(blade) {
if (uv_blade_nr_possible_cpus(blade))
continue;
gru_chiplet_teardown_tlb_irq(0, 0, blade);
gru_chiplet_teardown_tlb_irq(1, 0, blade);
}
}
static int gru_setup_tlb_irqs(void)
{
int blade;
int cpu;
int ret;
for_each_online_cpu(cpu) {
blade = uv_cpu_to_blade_id(cpu);
ret = gru_chiplet_setup_tlb_irq(0, "GRU0_TLB", gru0_intr, cpu, blade);
if (ret != 0)
goto exit1;
ret = gru_chiplet_setup_tlb_irq(1, "GRU1_TLB", gru1_intr, cpu, blade);
if (ret != 0)
goto exit1;
}
for_each_possible_blade(blade) {
if (uv_blade_nr_possible_cpus(blade))
continue;
ret = gru_chiplet_setup_tlb_irq(0, "GRU0_TLB", gru_intr_mblade, 0, blade);
if (ret != 0)
goto exit1;
ret = gru_chiplet_setup_tlb_irq(1, "GRU1_TLB", gru_intr_mblade, 0, blade);
if (ret != 0)
goto exit1;
}
return 0;
exit1:
gru_teardown_tlb_irqs();
return ret;
}
/*
* gru_init
*
@ -338,8 +516,7 @@ static int get_base_irq(void)
*/
static int __init gru_init(void)
{
int ret, irq, chip;
char id[10];
int ret;
if (!is_uv_system())
return 0;
@ -354,41 +531,29 @@ static int __init gru_init(void)
gru_end_paddr = gru_start_paddr + GRU_MAX_BLADES * GRU_SIZE;
printk(KERN_INFO "GRU space: 0x%lx - 0x%lx\n",
gru_start_paddr, gru_end_paddr);
irq = get_base_irq();
for (chip = 0; chip < GRU_CHIPLETS_PER_BLADE; chip++) {
ret = request_irq(irq + chip, gru_intr, 0, id, NULL);
/* TODO: fix irq handling on x86. For now ignore failure because
* interrupts are not required & not yet fully supported */
if (ret) {
printk(KERN_WARNING
"!!!WARNING: GRU ignoring request failure!!!\n");
ret = 0;
}
if (ret) {
printk(KERN_ERR "%s: request_irq failed\n",
GRU_DRIVER_ID_STR);
goto exit1;
}
}
ret = misc_register(&gru_miscdev);
if (ret) {
printk(KERN_ERR "%s: misc_register failed\n",
GRU_DRIVER_ID_STR);
goto exit1;
goto exit0;
}
ret = gru_proc_init();
if (ret) {
printk(KERN_ERR "%s: proc init failed\n", GRU_DRIVER_ID_STR);
goto exit2;
goto exit1;
}
ret = gru_init_tables(gru_start_paddr, gru_start_vaddr);
if (ret) {
printk(KERN_ERR "%s: init tables failed\n", GRU_DRIVER_ID_STR);
goto exit3;
goto exit2;
}
ret = gru_setup_tlb_irqs();
if (ret != 0)
goto exit3;
gru_kservices_init();
printk(KERN_INFO "%s: v%s\n", GRU_DRIVER_ID_STR,
@ -396,31 +561,24 @@ static int __init gru_init(void)
return 0;
exit3:
gru_proc_exit();
gru_free_tables();
exit2:
misc_deregister(&gru_miscdev);
gru_proc_exit();
exit1:
for (--chip; chip >= 0; chip--)
free_irq(irq + chip, NULL);
misc_deregister(&gru_miscdev);
exit0:
return ret;
}
static void __exit gru_exit(void)
{
int i, bid;
int order = get_order(sizeof(struct gru_state) *
GRU_CHIPLETS_PER_BLADE);
if (!is_uv_system())
return;
for (i = 0; i < GRU_CHIPLETS_PER_BLADE; i++)
free_irq(IRQ_GRU + i, NULL);
gru_teardown_tlb_irqs();
gru_kservices_exit();
for (bid = 0; bid < GRU_MAX_BLADES; bid++)
free_pages((unsigned long)gru_base[bid], order);
gru_free_tables();
misc_deregister(&gru_miscdev);
gru_proc_exit();
}

View file

@ -27,9 +27,11 @@
#ifdef CONFIG_IA64
#include <asm/processor.h>
#define GRU_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10)
#define CLKS2NSEC(c) ((c) *1000000000 / local_cpu_data->itc_freq)
#else
#include <asm/tsc.h>
#define GRU_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
#define CLKS2NSEC(c) ((c) * 1000000 / tsc_khz)
#endif
/* Extract the status field from a kernel handle */
@ -39,21 +41,39 @@ struct mcs_op_statistic mcs_op_statistics[mcsop_last];
static void update_mcs_stats(enum mcs_op op, unsigned long clks)
{
unsigned long nsec;
nsec = CLKS2NSEC(clks);
atomic_long_inc(&mcs_op_statistics[op].count);
atomic_long_add(clks, &mcs_op_statistics[op].total);
if (mcs_op_statistics[op].max < clks)
mcs_op_statistics[op].max = clks;
atomic_long_add(nsec, &mcs_op_statistics[op].total);
if (mcs_op_statistics[op].max < nsec)
mcs_op_statistics[op].max = nsec;
}
static void start_instruction(void *h)
{
unsigned long *w0 = h;
wmb(); /* setting CMD bit must be last */
*w0 = *w0 | 1;
wmb(); /* setting CMD/STATUS bits must be last */
*w0 = *w0 | 0x20001;
gru_flush_cache(h);
}
static void report_instruction_timeout(void *h)
{
unsigned long goff = GSEGPOFF((unsigned long)h);
char *id = "???";
if (TYPE_IS(CCH, goff))
id = "CCH";
else if (TYPE_IS(TGH, goff))
id = "TGH";
else if (TYPE_IS(TFH, goff))
id = "TFH";
panic(KERN_ALERT "GRU %p (%s) is malfunctioning\n", h, id);
}
static int wait_instruction_complete(void *h, enum mcs_op opc)
{
int status;
@ -64,9 +84,10 @@ static int wait_instruction_complete(void *h, enum mcs_op opc)
status = GET_MSEG_HANDLE_STATUS(h);
if (status != CCHSTATUS_ACTIVE)
break;
if (GRU_OPERATION_TIMEOUT < (get_cycles() - start_time))
panic("GRU %p is malfunctioning: start %ld, end %ld\n",
h, start_time, (unsigned long)get_cycles());
if (GRU_OPERATION_TIMEOUT < (get_cycles() - start_time)) {
report_instruction_timeout(h);
start_time = get_cycles();
}
}
if (gru_options & OPT_STATS)
update_mcs_stats(opc, get_cycles() - start_time);
@ -75,9 +96,18 @@ static int wait_instruction_complete(void *h, enum mcs_op opc)
int cch_allocate(struct gru_context_configuration_handle *cch)
{
int ret;
cch->opc = CCHOP_ALLOCATE;
start_instruction(cch);
return wait_instruction_complete(cch, cchop_allocate);
ret = wait_instruction_complete(cch, cchop_allocate);
/*
* Stop speculation into the GSEG being mapped by the previous ALLOCATE.
* The GSEG memory does not exist until the ALLOCATE completes.
*/
sync_core();
return ret;
}
int cch_start(struct gru_context_configuration_handle *cch)
@ -96,9 +126,18 @@ int cch_interrupt(struct gru_context_configuration_handle *cch)
int cch_deallocate(struct gru_context_configuration_handle *cch)
{
int ret;
cch->opc = CCHOP_DEALLOCATE;
start_instruction(cch);
return wait_instruction_complete(cch, cchop_deallocate);
ret = wait_instruction_complete(cch, cchop_deallocate);
/*
* Stop speculation into the GSEG being unmapped by the previous
* DEALLOCATE.
*/
sync_core();
return ret;
}
int cch_interrupt_sync(struct gru_context_configuration_handle
@ -126,17 +165,20 @@ int tgh_invalidate(struct gru_tlb_global_handle *tgh,
return wait_instruction_complete(tgh, tghop_invalidate);
}
void tfh_write_only(struct gru_tlb_fault_handle *tfh,
unsigned long pfn, unsigned long vaddr,
int asid, int dirty, int pagesize)
int tfh_write_only(struct gru_tlb_fault_handle *tfh,
unsigned long paddr, int gaa,
unsigned long vaddr, int asid, int dirty,
int pagesize)
{
tfh->fillasid = asid;
tfh->fillvaddr = vaddr;
tfh->pfn = pfn;
tfh->pfn = paddr >> GRU_PADDR_SHIFT;
tfh->gaa = gaa;
tfh->dirty = dirty;
tfh->pagesize = pagesize;
tfh->opc = TFHOP_WRITE_ONLY;
start_instruction(tfh);
return wait_instruction_complete(tfh, tfhop_write_only);
}
void tfh_write_restart(struct gru_tlb_fault_handle *tfh,

View file

@ -91,6 +91,12 @@
/* Convert an arbitrary handle address to the beginning of the GRU segment */
#define GRUBASE(h) ((void *)((unsigned long)(h) & ~(GRU_SIZE - 1)))
/* Test a valid handle address to determine the type */
#define TYPE_IS(hn, h) ((h) >= GRU_##hn##_BASE && (h) < \
GRU_##hn##_BASE + GRU_NUM_##hn * GRU_HANDLE_STRIDE && \
(((h) & (GRU_HANDLE_STRIDE - 1)) == 0))
/* General addressing macros. */
static inline void *get_gseg_base_address(void *base, int ctxnum)
{
@ -158,6 +164,16 @@ static inline void *gru_chiplet_vaddr(void *vaddr, int pnode, int chiplet)
return vaddr + GRU_SIZE * (2 * pnode + chiplet);
}
static inline struct gru_control_block_extended *gru_tfh_to_cbe(
struct gru_tlb_fault_handle *tfh)
{
unsigned long cbe;
cbe = (unsigned long)tfh - GRU_TFH_BASE + GRU_CBE_BASE;
return (struct gru_control_block_extended*)cbe;
}
/*
@ -236,6 +252,17 @@ enum gru_tgh_state {
TGHSTATE_RESTART_CTX,
};
enum gru_tgh_cause {
TGHCAUSE_RR_ECC,
TGHCAUSE_TLB_ECC,
TGHCAUSE_LRU_ECC,
TGHCAUSE_PS_ECC,
TGHCAUSE_MUL_ERR,
TGHCAUSE_DATA_ERR,
TGHCAUSE_SW_FORCE
};
/*
* TFH - TLB Global Handle
* Used for TLB dropins into the GRU TLB.
@ -440,6 +467,12 @@ struct gru_control_block_extended {
unsigned int cbrexecstatus:8;
};
/* CBE fields for active BCOPY instructions */
#define cbe_baddr0 idef1upd
#define cbe_baddr1 idef3upd
#define cbe_src_cl idef6cpy
#define cbe_nelemcur idef5upd
enum gru_cbr_state {
CBRSTATE_INACTIVE,
CBRSTATE_IDLE,
@ -487,8 +520,8 @@ int cch_interrupt_sync(struct gru_context_configuration_handle *cch);
int tgh_invalidate(struct gru_tlb_global_handle *tgh, unsigned long vaddr,
unsigned long vaddrmask, int asid, int pagesize, int global, int n,
unsigned short ctxbitmap);
void tfh_write_only(struct gru_tlb_fault_handle *tfh, unsigned long pfn,
unsigned long vaddr, int asid, int dirty, int pagesize);
int tfh_write_only(struct gru_tlb_fault_handle *tfh, unsigned long paddr,
int gaa, unsigned long vaddr, int asid, int dirty, int pagesize);
void tfh_write_restart(struct gru_tlb_fault_handle *tfh, unsigned long paddr,
int gaa, unsigned long vaddr, int asid, int dirty, int pagesize);
void tfh_restart(struct gru_tlb_fault_handle *tfh);

View file

@ -44,7 +44,8 @@ static int gru_user_copy_handle(void __user **dp, void *s)
static int gru_dump_context_data(void *grubase,
struct gru_context_configuration_handle *cch,
void __user *ubuf, int ctxnum, int dsrcnt)
void __user *ubuf, int ctxnum, int dsrcnt,
int flush_cbrs)
{
void *cb, *cbe, *tfh, *gseg;
int i, scr;
@ -55,6 +56,8 @@ static int gru_dump_context_data(void *grubase,
tfh = grubase + GRU_TFH_BASE;
for_each_cbr_in_allocation_map(i, &cch->cbr_allocation_map, scr) {
if (flush_cbrs)
gru_flush_cache(cb);
if (gru_user_copy_handle(&ubuf, cb))
goto fail;
if (gru_user_copy_handle(&ubuf, tfh + i * GRU_HANDLE_STRIDE))
@ -115,7 +118,7 @@ static int gru_dump_tgh(struct gru_state *gru,
static int gru_dump_context(struct gru_state *gru, int ctxnum,
void __user *ubuf, void __user *ubufend, char data_opt,
char lock_cch)
char lock_cch, char flush_cbrs)
{
struct gru_dump_context_header hdr;
struct gru_dump_context_header __user *uhdr = ubuf;
@ -159,8 +162,7 @@ static int gru_dump_context(struct gru_state *gru, int ctxnum,
ret = -EFBIG;
else
ret = gru_dump_context_data(grubase, cch, ubuf, ctxnum,
dsrcnt);
dsrcnt, flush_cbrs);
}
if (cch_locked)
unlock_cch_handle(cch);
@ -215,7 +217,8 @@ int gru_dump_chiplet_request(unsigned long arg)
for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
if (req.ctxnum == ctxnum || req.ctxnum < 0) {
ret = gru_dump_context(gru, ctxnum, ubuf, ubufend,
req.data_opt, req.lock_cch);
req.data_opt, req.lock_cch,
req.flush_cbrs);
if (ret < 0)
goto fail;
ubuf += ret;

View file

@ -31,6 +31,7 @@
#include <linux/interrupt.h>
#include <linux/uaccess.h>
#include <linux/delay.h>
#include <asm/io_apic.h>
#include "gru.h"
#include "grulib.h"
#include "grutables.h"
@ -97,9 +98,6 @@
#define ASYNC_HAN_TO_BID(h) ((h) - 1)
#define ASYNC_BID_TO_HAN(b) ((b) + 1)
#define ASYNC_HAN_TO_BS(h) gru_base[ASYNC_HAN_TO_BID(h)]
#define KCB_TO_GID(cb) ((cb - gru_start_vaddr) / \
(GRU_SIZE * GRU_CHIPLETS_PER_BLADE))
#define KCB_TO_BS(cb) gru_base[KCB_TO_GID(cb)]
#define GRU_NUM_KERNEL_CBR 1
#define GRU_NUM_KERNEL_DSR_BYTES 256
@ -160,8 +158,10 @@ static void gru_load_kernel_context(struct gru_blade_state *bs, int blade_id)
up_read(&bs->bs_kgts_sema);
down_write(&bs->bs_kgts_sema);
if (!bs->bs_kgts)
bs->bs_kgts = gru_alloc_gts(NULL, 0, 0, 0, 0);
if (!bs->bs_kgts) {
bs->bs_kgts = gru_alloc_gts(NULL, 0, 0, 0, 0, 0);
bs->bs_kgts->ts_user_blade_id = blade_id;
}
kgts = bs->bs_kgts;
if (!kgts->ts_gru) {
@ -172,9 +172,9 @@ static void gru_load_kernel_context(struct gru_blade_state *bs, int blade_id)
kgts->ts_dsr_au_count = GRU_DS_BYTES_TO_AU(
GRU_NUM_KERNEL_DSR_BYTES * ncpus +
bs->bs_async_dsr_bytes);
while (!gru_assign_gru_context(kgts, blade_id)) {
while (!gru_assign_gru_context(kgts)) {
msleep(1);
gru_steal_context(kgts, blade_id);
gru_steal_context(kgts);
}
gru_load_context(kgts);
gru = bs->bs_kgts->ts_gru;
@ -200,13 +200,15 @@ static int gru_free_kernel_contexts(void)
bs = gru_base[bid];
if (!bs)
continue;
/* Ignore busy contexts. Don't want to block here. */
if (down_write_trylock(&bs->bs_kgts_sema)) {
kgts = bs->bs_kgts;
if (kgts && kgts->ts_gru)
gru_unload_context(kgts, 0);
kfree(kgts);
bs->bs_kgts = NULL;
up_write(&bs->bs_kgts_sema);
kfree(kgts);
} else {
ret++;
}
@ -220,13 +222,21 @@ static int gru_free_kernel_contexts(void)
static struct gru_blade_state *gru_lock_kernel_context(int blade_id)
{
struct gru_blade_state *bs;
int bid;
STAT(lock_kernel_context);
bs = gru_base[blade_id];
again:
bid = blade_id < 0 ? uv_numa_blade_id() : blade_id;
bs = gru_base[bid];
/* Handle the case where migration occured while waiting for the sema */
down_read(&bs->bs_kgts_sema);
if (blade_id < 0 && bid != uv_numa_blade_id()) {
up_read(&bs->bs_kgts_sema);
goto again;
}
if (!bs->bs_kgts || !bs->bs_kgts->ts_gru)
gru_load_kernel_context(bs, blade_id);
gru_load_kernel_context(bs, bid);
return bs;
}
@ -255,7 +265,7 @@ static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr)
BUG_ON(dsr_bytes > GRU_NUM_KERNEL_DSR_BYTES);
preempt_disable();
bs = gru_lock_kernel_context(uv_numa_blade_id());
bs = gru_lock_kernel_context(-1);
lcpu = uv_blade_processor_id();
*cb = bs->kernel_cb + lcpu * GRU_HANDLE_STRIDE;
*dsr = bs->kernel_dsr + lcpu * GRU_NUM_KERNEL_DSR_BYTES;
@ -384,13 +394,31 @@ int gru_get_cb_exception_detail(void *cb,
struct control_block_extended_exc_detail *excdet)
{
struct gru_control_block_extended *cbe;
struct gru_blade_state *bs;
int cbrnum;
struct gru_thread_state *kgts = NULL;
unsigned long off;
int cbrnum, bid;
bs = KCB_TO_BS(cb);
cbrnum = thread_cbr_number(bs->bs_kgts, get_cb_number(cb));
/*
* Locate kgts for cb. This algorithm is SLOW but
* this function is rarely called (ie., almost never).
* Performance does not matter.
*/
for_each_possible_blade(bid) {
if (!gru_base[bid])
break;
kgts = gru_base[bid]->bs_kgts;
if (!kgts || !kgts->ts_gru)
continue;
off = cb - kgts->ts_gru->gs_gru_base_vaddr;
if (off < GRU_SIZE)
break;
kgts = NULL;
}
BUG_ON(!kgts);
cbrnum = thread_cbr_number(kgts, get_cb_number(cb));
cbe = get_cbe(GRUBASE(cb), cbrnum);
gru_flush_cache(cbe); /* CBE not coherent */
sync_core();
excdet->opc = cbe->opccpy;
excdet->exopc = cbe->exopccpy;
excdet->ecause = cbe->ecause;
@ -409,8 +437,8 @@ char *gru_get_cb_exception_detail_str(int ret, void *cb,
if (ret > 0 && gen->istatus == CBS_EXCEPTION) {
gru_get_cb_exception_detail(cb, &excdet);
snprintf(buf, size,
"GRU exception: cb %p, opc %d, exopc %d, ecause 0x%x,"
"excdet0 0x%lx, excdet1 0x%x",
"GRU:%d exception: cb %p, opc %d, exopc %d, ecause 0x%x,"
"excdet0 0x%lx, excdet1 0x%x", smp_processor_id(),
gen, excdet.opc, excdet.exopc, excdet.ecause,
excdet.exceptdet0, excdet.exceptdet1);
} else {
@ -457,9 +485,10 @@ int gru_check_status_proc(void *cb)
int ret;
ret = gen->istatus;
if (ret != CBS_EXCEPTION)
return ret;
return gru_retry_exception(cb);
if (ret == CBS_EXCEPTION)
ret = gru_retry_exception(cb);
rmb();
return ret;
}
@ -471,7 +500,7 @@ int gru_wait_proc(void *cb)
ret = gru_wait_idle_or_exception(gen);
if (ret == CBS_EXCEPTION)
ret = gru_retry_exception(cb);
rmb();
return ret;
}
@ -538,7 +567,7 @@ int gru_create_message_queue(struct gru_message_queue_desc *mqd,
mqd->mq = mq;
mqd->mq_gpa = uv_gpa(mq);
mqd->qlines = qlines;
mqd->interrupt_pnode = UV_NASID_TO_PNODE(nasid);
mqd->interrupt_pnode = nasid >> 1;
mqd->interrupt_vector = vector;
mqd->interrupt_apicid = apicid;
return 0;
@ -598,6 +627,8 @@ static int send_noop_message(void *cb, struct gru_message_queue_desc *mqd,
ret = MQE_UNEXPECTED_CB_ERR;
break;
case CBSS_PAGE_OVERFLOW:
STAT(mesq_noop_page_overflow);
/* fallthru */
default:
BUG();
}
@ -672,18 +703,6 @@ static int send_message_queue_full(void *cb, struct gru_message_queue_desc *mqd,
return MQE_UNEXPECTED_CB_ERR;
}
/*
* Send a cross-partition interrupt to the SSI that contains the target
* message queue. Normally, the interrupt is automatically delivered by hardware
* but some error conditions require explicit delivery.
*/
static void send_message_queue_interrupt(struct gru_message_queue_desc *mqd)
{
if (mqd->interrupt_vector)
uv_hub_send_ipi(mqd->interrupt_pnode, mqd->interrupt_apicid,
mqd->interrupt_vector);
}
/*
* Handle a PUT failure. Note: if message was a 2-line message, one of the
* lines might have successfully have been written. Before sending the
@ -693,7 +712,8 @@ static void send_message_queue_interrupt(struct gru_message_queue_desc *mqd)
static int send_message_put_nacked(void *cb, struct gru_message_queue_desc *mqd,
void *mesg, int lines)
{
unsigned long m;
unsigned long m, *val = mesg, gpa, save;
int ret;
m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6);
if (lines == 2) {
@ -704,7 +724,26 @@ static int send_message_put_nacked(void *cb, struct gru_message_queue_desc *mqd,
gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA);
if (gru_wait(cb) != CBS_IDLE)
return MQE_UNEXPECTED_CB_ERR;
send_message_queue_interrupt(mqd);
if (!mqd->interrupt_vector)
return MQE_OK;
/*
* Send a cross-partition interrupt to the SSI that contains the target
* message queue. Normally, the interrupt is automatically delivered by
* hardware but some error conditions require explicit delivery.
* Use the GRU to deliver the interrupt. Otherwise partition failures
* could cause unrecovered errors.
*/
gpa = uv_global_gru_mmr_address(mqd->interrupt_pnode, UVH_IPI_INT);
save = *val;
*val = uv_hub_ipi_value(mqd->interrupt_apicid, mqd->interrupt_vector,
dest_Fixed);
gru_vstore_phys(cb, gpa, gru_get_tri(mesg), IAA_REGISTER, IMA);
ret = gru_wait(cb);
*val = save;
if (ret != CBS_IDLE)
return MQE_UNEXPECTED_CB_ERR;
return MQE_OK;
}
@ -739,6 +778,9 @@ static int send_message_failure(void *cb, struct gru_message_queue_desc *mqd,
STAT(mesq_send_put_nacked);
ret = send_message_put_nacked(cb, mqd, mesg, lines);
break;
case CBSS_PAGE_OVERFLOW:
STAT(mesq_page_overflow);
/* fallthru */
default:
BUG();
}
@ -831,7 +873,6 @@ void *gru_get_next_message(struct gru_message_queue_desc *mqd)
int present = mhdr->present;
/* skip NOOP messages */
STAT(mesq_receive);
while (present == MQS_NOOP) {
gru_free_message(mqd, mhdr);
mhdr = mq->next;
@ -851,12 +892,36 @@ void *gru_get_next_message(struct gru_message_queue_desc *mqd)
if (mhdr->lines == 2)
restore_present2(mhdr, mhdr->present2);
STAT(mesq_receive);
return mhdr;
}
EXPORT_SYMBOL_GPL(gru_get_next_message);
/* ---------------------- GRU DATA COPY FUNCTIONS ---------------------------*/
/*
* Load a DW from a global GPA. The GPA can be a memory or MMR address.
*/
int gru_read_gpa(unsigned long *value, unsigned long gpa)
{
void *cb;
void *dsr;
int ret, iaa;
STAT(read_gpa);
if (gru_get_cpu_resources(GRU_NUM_KERNEL_DSR_BYTES, &cb, &dsr))
return MQE_BUG_NO_RESOURCES;
iaa = gpa >> 62;
gru_vload_phys(cb, gpa, gru_get_tri(dsr), iaa, IMA);
ret = gru_wait(cb);
if (ret == CBS_IDLE)
*value = *(unsigned long *)dsr;
gru_free_cpu_resources(cb, dsr);
return ret;
}
EXPORT_SYMBOL_GPL(gru_read_gpa);
/*
* Copy a block of data using the GRU resources
*/
@ -898,24 +963,24 @@ static int quicktest0(unsigned long arg)
gru_vload(cb, uv_gpa(&word0), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA);
if (gru_wait(cb) != CBS_IDLE) {
printk(KERN_DEBUG "GRU quicktest0: CBR failure 1\n");
printk(KERN_DEBUG "GRU:%d quicktest0: CBR failure 1\n", smp_processor_id());
goto done;
}
if (*p != MAGIC) {
printk(KERN_DEBUG "GRU: quicktest0 bad magic 0x%lx\n", *p);
printk(KERN_DEBUG "GRU:%d quicktest0 bad magic 0x%lx\n", smp_processor_id(), *p);
goto done;
}
gru_vstore(cb, uv_gpa(&word1), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA);
if (gru_wait(cb) != CBS_IDLE) {
printk(KERN_DEBUG "GRU quicktest0: CBR failure 2\n");
printk(KERN_DEBUG "GRU:%d quicktest0: CBR failure 2\n", smp_processor_id());
goto done;
}
if (word0 != word1 || word1 != MAGIC) {
printk(KERN_DEBUG
"GRU quicktest0 err: found 0x%lx, expected 0x%lx\n",
word1, MAGIC);
"GRU:%d quicktest0 err: found 0x%lx, expected 0x%lx\n",
smp_processor_id(), word1, MAGIC);
goto done;
}
ret = 0;
@ -952,8 +1017,11 @@ static int quicktest1(unsigned long arg)
if (ret)
break;
}
if (ret != MQE_QUEUE_FULL || i != 4)
if (ret != MQE_QUEUE_FULL || i != 4) {
printk(KERN_DEBUG "GRU:%d quicktest1: unexpect status %d, i %d\n",
smp_processor_id(), ret, i);
goto done;
}
for (i = 0; i < 6; i++) {
m = gru_get_next_message(&mqd);
@ -961,7 +1029,12 @@ static int quicktest1(unsigned long arg)
break;
gru_free_message(&mqd, m);
}
ret = (i == 4) ? 0 : -EIO;
if (i != 4) {
printk(KERN_DEBUG "GRU:%d quicktest2: bad message, i %d, m %p, m8 %d\n",
smp_processor_id(), i, m, m ? m[8] : -1);
goto done;
}
ret = 0;
done:
kfree(p);
@ -977,6 +1050,7 @@ static int quicktest2(unsigned long arg)
int ret = 0;
unsigned long *buf;
void *cb0, *cb;
struct gru_control_block_status *gen;
int i, k, istatus, bytes;
bytes = numcb * 4 * 8;
@ -996,20 +1070,30 @@ static int quicktest2(unsigned long arg)
XTYPE_DW, 4, 1, IMA_INTERRUPT);
ret = 0;
for (k = 0; k < numcb; k++) {
k = numcb;
do {
gru_wait_async_cbr(han);
for (i = 0; i < numcb; i++) {
cb = cb0 + i * GRU_HANDLE_STRIDE;
istatus = gru_check_status(cb);
if (istatus == CBS_ACTIVE)
continue;
if (istatus == CBS_EXCEPTION)
ret = -EFAULT;
else if (buf[i] || buf[i + 1] || buf[i + 2] ||
buf[i + 3])
ret = -EIO;
if (istatus != CBS_ACTIVE && istatus != CBS_CALL_OS)
break;
}
}
if (i == numcb)
continue;
if (istatus != CBS_IDLE) {
printk(KERN_DEBUG "GRU:%d quicktest2: cb %d, exception\n", smp_processor_id(), i);
ret = -EFAULT;
} else if (buf[4 * i] || buf[4 * i + 1] || buf[4 * i + 2] ||
buf[4 * i + 3]) {
printk(KERN_DEBUG "GRU:%d quicktest2:cb %d, buf 0x%lx, 0x%lx, 0x%lx, 0x%lx\n",
smp_processor_id(), i, buf[4 * i], buf[4 * i + 1], buf[4 * i + 2], buf[4 * i + 3]);
ret = -EIO;
}
k--;
gen = cb;
gen->istatus = CBS_CALL_OS; /* don't handle this CBR again */
} while (k);
BUG_ON(cmp.done);
gru_unlock_async_resource(han);
@ -1019,6 +1103,22 @@ static int quicktest2(unsigned long arg)
return ret;
}
#define BUFSIZE 200
static int quicktest3(unsigned long arg)
{
char buf1[BUFSIZE], buf2[BUFSIZE];
int ret = 0;
memset(buf2, 0, sizeof(buf2));
memset(buf1, get_cycles() & 255, sizeof(buf1));
gru_copy_gpa(uv_gpa(buf2), uv_gpa(buf1), BUFSIZE);
if (memcmp(buf1, buf2, BUFSIZE)) {
printk(KERN_DEBUG "GRU:%d quicktest3 error\n", smp_processor_id());
ret = -EIO;
}
return ret;
}
/*
* Debugging only. User hook for various kernel tests
* of driver & gru.
@ -1037,6 +1137,9 @@ int gru_ktest(unsigned long arg)
case 2:
ret = quicktest2(arg);
break;
case 3:
ret = quicktest3(arg);
break;
case 99:
ret = gru_free_kernel_contexts();
break;

View file

@ -130,6 +130,20 @@ extern void gru_free_message(struct gru_message_queue_desc *mqd,
extern void *gru_get_next_message(struct gru_message_queue_desc *mqd);
/*
* Read a GRU global GPA. Source can be located in a remote partition.
*
* Input:
* value memory address where MMR value is returned
* gpa source numalink physical address of GPA
*
* Output:
* 0 OK
* >0 error
*/
int gru_read_gpa(unsigned long *value, unsigned long gpa);
/*
* Copy data using the GRU. Source or destination can be located in a remote
* partition.

View file

@ -63,18 +63,9 @@
#define THREAD_POINTER(p, th) (p + GRU_GSEG_PAGESIZE * (th))
#define GSEG_START(cb) ((void *)((unsigned long)(cb) & ~(GRU_GSEG_PAGESIZE - 1)))
/*
* Statictics kept on a per-GTS basis.
*/
struct gts_statistics {
unsigned long fmm_tlbdropin;
unsigned long upm_tlbdropin;
unsigned long context_stolen;
};
struct gru_get_gseg_statistics_req {
unsigned long gseg;
struct gts_statistics stats;
unsigned long gseg;
struct gru_gseg_statistics stats;
};
/*
@ -86,6 +77,7 @@ struct gru_create_context_req {
unsigned int control_blocks;
unsigned int maximum_thread_count;
unsigned int options;
unsigned char tlb_preload_count;
};
/*
@ -98,11 +90,12 @@ struct gru_unload_context_req {
/*
* Structure used to set context options
*/
enum {sco_gseg_owner, sco_cch_req_slice};
enum {sco_gseg_owner, sco_cch_req_slice, sco_blade_chiplet};
struct gru_set_context_option_req {
unsigned long gseg;
int op;
unsigned long val1;
int val0;
long val1;
};
/*
@ -124,6 +117,8 @@ struct gru_dump_chiplet_state_req {
int ctxnum;
char data_opt;
char lock_cch;
char flush_cbrs;
char fill[10];
pid_t pid;
void *buf;
size_t buflen;

View file

@ -27,6 +27,7 @@
#include <linux/sched.h>
#include <linux/device.h>
#include <linux/list.h>
#include <linux/err.h>
#include <asm/uv/uv_hub.h>
#include "gru.h"
#include "grutables.h"
@ -48,12 +49,20 @@ struct device *grudev = &gru_device;
/*
* Select a gru fault map to be used by the current cpu. Note that
* multiple cpus may be using the same map.
* ZZZ should "shift" be used?? Depends on HT cpu numbering
* ZZZ should be inline but did not work on emulator
*/
int gru_cpu_fault_map_id(void)
{
#ifdef CONFIG_IA64
return uv_blade_processor_id() % GRU_NUM_TFM;
#else
int cpu = smp_processor_id();
int id, core;
core = uv_cpu_core_number(cpu);
id = core + UV_MAX_INT_CORES * uv_cpu_socket_number(cpu);
return id;
#endif
}
/*--------- ASID Management -------------------------------------------
@ -286,7 +295,8 @@ static void gru_unload_mm_tracker(struct gru_state *gru,
void gts_drop(struct gru_thread_state *gts)
{
if (gts && atomic_dec_return(&gts->ts_refcnt) == 0) {
gru_drop_mmu_notifier(gts->ts_gms);
if (gts->ts_gms)
gru_drop_mmu_notifier(gts->ts_gms);
kfree(gts);
STAT(gts_free);
}
@ -310,16 +320,18 @@ static struct gru_thread_state *gru_find_current_gts_nolock(struct gru_vma_data
* Allocate a thread state structure.
*/
struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
int cbr_au_count, int dsr_au_count, int options, int tsid)
int cbr_au_count, int dsr_au_count,
unsigned char tlb_preload_count, int options, int tsid)
{
struct gru_thread_state *gts;
struct gru_mm_struct *gms;
int bytes;
bytes = DSR_BYTES(dsr_au_count) + CBR_BYTES(cbr_au_count);
bytes += sizeof(struct gru_thread_state);
gts = kmalloc(bytes, GFP_KERNEL);
if (!gts)
return NULL;
return ERR_PTR(-ENOMEM);
STAT(gts_alloc);
memset(gts, 0, sizeof(struct gru_thread_state)); /* zero out header */
@ -327,7 +339,10 @@ struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
mutex_init(&gts->ts_ctxlock);
gts->ts_cbr_au_count = cbr_au_count;
gts->ts_dsr_au_count = dsr_au_count;
gts->ts_tlb_preload_count = tlb_preload_count;
gts->ts_user_options = options;
gts->ts_user_blade_id = -1;
gts->ts_user_chiplet_id = -1;
gts->ts_tsid = tsid;
gts->ts_ctxnum = NULLCTX;
gts->ts_tlb_int_select = -1;
@ -336,9 +351,10 @@ struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
if (vma) {
gts->ts_mm = current->mm;
gts->ts_vma = vma;
gts->ts_gms = gru_register_mmu_notifier();
if (!gts->ts_gms)
gms = gru_register_mmu_notifier();
if (IS_ERR(gms))
goto err;
gts->ts_gms = gms;
}
gru_dbg(grudev, "alloc gts %p\n", gts);
@ -346,7 +362,7 @@ struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
err:
gts_drop(gts);
return NULL;
return ERR_CAST(gms);
}
/*
@ -360,6 +376,7 @@ struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma, int tsid)
if (!vdata)
return NULL;
STAT(vdata_alloc);
INIT_LIST_HEAD(&vdata->vd_head);
spin_lock_init(&vdata->vd_lock);
gru_dbg(grudev, "alloc vdata %p\n", vdata);
@ -392,10 +409,12 @@ struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct *vma,
struct gru_vma_data *vdata = vma->vm_private_data;
struct gru_thread_state *gts, *ngts;
gts = gru_alloc_gts(vma, vdata->vd_cbr_au_count, vdata->vd_dsr_au_count,
gts = gru_alloc_gts(vma, vdata->vd_cbr_au_count,
vdata->vd_dsr_au_count,
vdata->vd_tlb_preload_count,
vdata->vd_user_options, tsid);
if (!gts)
return NULL;
if (IS_ERR(gts))
return gts;
spin_lock(&vdata->vd_lock);
ngts = gru_find_current_gts_nolock(vdata, tsid);
@ -493,6 +512,9 @@ static void gru_load_context_data(void *save, void *grubase, int ctxnum,
memset(cbe + i * GRU_HANDLE_STRIDE, 0,
GRU_CACHE_LINE_BYTES);
}
/* Flush CBE to hide race in context restart */
mb();
gru_flush_cache(cbe + i * GRU_HANDLE_STRIDE);
cb += GRU_HANDLE_STRIDE;
}
@ -513,6 +535,12 @@ static void gru_unload_context_data(void *save, void *grubase, int ctxnum,
cb = gseg + GRU_CB_BASE;
cbe = grubase + GRU_CBE_BASE;
length = hweight64(dsrmap) * GRU_DSR_AU_BYTES;
/* CBEs may not be coherent. Flush them from cache */
for_each_cbr_in_allocation_map(i, &cbrmap, scr)
gru_flush_cache(cbe + i * GRU_HANDLE_STRIDE);
mb(); /* Let the CL flush complete */
gru_prefetch_context(gseg, cb, cbe, cbrmap, length);
for_each_cbr_in_allocation_map(i, &cbrmap, scr) {
@ -533,7 +561,8 @@ void gru_unload_context(struct gru_thread_state *gts, int savestate)
zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE);
cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
gru_dbg(grudev, "gts %p\n", gts);
gru_dbg(grudev, "gts %p, cbrmap 0x%lx, dsrmap 0x%lx\n",
gts, gts->ts_cbr_map, gts->ts_dsr_map);
lock_cch_handle(cch);
if (cch_interrupt_sync(cch))
BUG();
@ -549,7 +578,6 @@ void gru_unload_context(struct gru_thread_state *gts, int savestate)
if (cch_deallocate(cch))
BUG();
gts->ts_force_unload = 0; /* ts_force_unload locked by CCH lock */
unlock_cch_handle(cch);
gru_free_gru_context(gts);
@ -565,9 +593,7 @@ void gru_load_context(struct gru_thread_state *gts)
struct gru_context_configuration_handle *cch;
int i, err, asid, ctxnum = gts->ts_ctxnum;
gru_dbg(grudev, "gts %p\n", gts);
cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
lock_cch_handle(cch);
cch->tfm_fault_bit_enable =
(gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
@ -591,6 +617,7 @@ void gru_load_context(struct gru_thread_state *gts)
cch->unmap_enable = 1;
cch->tfm_done_bit_enable = 1;
cch->cb_int_enable = 1;
cch->tlb_int_select = 0; /* For now, ints go to cpu 0 */
} else {
cch->unmap_enable = 0;
cch->tfm_done_bit_enable = 0;
@ -616,17 +643,18 @@ void gru_load_context(struct gru_thread_state *gts)
if (cch_start(cch))
BUG();
unlock_cch_handle(cch);
gru_dbg(grudev, "gid %d, gts %p, cbrmap 0x%lx, dsrmap 0x%lx, tie %d, tis %d\n",
gts->ts_gru->gs_gid, gts, gts->ts_cbr_map, gts->ts_dsr_map,
(gts->ts_user_options == GRU_OPT_MISS_FMM_INTR), gts->ts_tlb_int_select);
}
/*
* Update fields in an active CCH:
* - retarget interrupts on local blade
* - update sizeavail mask
* - force a delayed context unload by clearing the CCH asids. This
* forces TLB misses for new GRU instructions. The context is unloaded
* when the next TLB miss occurs.
*/
int gru_update_cch(struct gru_thread_state *gts, int force_unload)
int gru_update_cch(struct gru_thread_state *gts)
{
struct gru_context_configuration_handle *cch;
struct gru_state *gru = gts->ts_gru;
@ -640,21 +668,13 @@ int gru_update_cch(struct gru_thread_state *gts, int force_unload)
goto exit;
if (cch_interrupt(cch))
BUG();
if (!force_unload) {
for (i = 0; i < 8; i++)
cch->sizeavail[i] = gts->ts_sizeavail;
gts->ts_tlb_int_select = gru_cpu_fault_map_id();
cch->tlb_int_select = gru_cpu_fault_map_id();
cch->tfm_fault_bit_enable =
(gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
|| gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
} else {
for (i = 0; i < 8; i++)
cch->asid[i] = 0;
cch->tfm_fault_bit_enable = 0;
cch->tlb_int_enable = 0;
gts->ts_force_unload = 1;
}
for (i = 0; i < 8; i++)
cch->sizeavail[i] = gts->ts_sizeavail;
gts->ts_tlb_int_select = gru_cpu_fault_map_id();
cch->tlb_int_select = gru_cpu_fault_map_id();
cch->tfm_fault_bit_enable =
(gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
|| gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
if (cch_start(cch))
BUG();
ret = 1;
@ -679,7 +699,54 @@ static int gru_retarget_intr(struct gru_thread_state *gts)
gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select,
gru_cpu_fault_map_id());
return gru_update_cch(gts, 0);
return gru_update_cch(gts);
}
/*
* Check if a GRU context is allowed to use a specific chiplet. By default
* a context is assigned to any blade-local chiplet. However, users can
* override this.
* Returns 1 if assignment allowed, 0 otherwise
*/
static int gru_check_chiplet_assignment(struct gru_state *gru,
struct gru_thread_state *gts)
{
int blade_id;
int chiplet_id;
blade_id = gts->ts_user_blade_id;
if (blade_id < 0)
blade_id = uv_numa_blade_id();
chiplet_id = gts->ts_user_chiplet_id;
return gru->gs_blade_id == blade_id &&
(chiplet_id < 0 || chiplet_id == gru->gs_chiplet_id);
}
/*
* Unload the gru context if it is not assigned to the correct blade or
* chiplet. Misassignment can occur if the process migrates to a different
* blade or if the user changes the selected blade/chiplet.
*/
void gru_check_context_placement(struct gru_thread_state *gts)
{
struct gru_state *gru;
/*
* If the current task is the context owner, verify that the
* context is correctly placed. This test is skipped for non-owner
* references. Pthread apps use non-owner references to the CBRs.
*/
gru = gts->ts_gru;
if (!gru || gts->ts_tgid_owner != current->tgid)
return;
if (!gru_check_chiplet_assignment(gru, gts)) {
STAT(check_context_unload);
gru_unload_context(gts, 1);
} else if (gru_retarget_intr(gts)) {
STAT(check_context_retarget_intr);
}
}
@ -712,13 +779,17 @@ static void gts_stolen(struct gru_thread_state *gts,
}
}
void gru_steal_context(struct gru_thread_state *gts, int blade_id)
void gru_steal_context(struct gru_thread_state *gts)
{
struct gru_blade_state *blade;
struct gru_state *gru, *gru0;
struct gru_thread_state *ngts = NULL;
int ctxnum, ctxnum0, flag = 0, cbr, dsr;
int blade_id;
blade_id = gts->ts_user_blade_id;
if (blade_id < 0)
blade_id = uv_numa_blade_id();
cbr = gts->ts_cbr_au_count;
dsr = gts->ts_dsr_au_count;
@ -729,35 +800,39 @@ void gru_steal_context(struct gru_thread_state *gts, int blade_id)
gru = blade->bs_lru_gru;
if (ctxnum == 0)
gru = next_gru(blade, gru);
blade->bs_lru_gru = gru;
blade->bs_lru_ctxnum = ctxnum;
ctxnum0 = ctxnum;
gru0 = gru;
while (1) {
if (check_gru_resources(gru, cbr, dsr, GRU_NUM_CCH))
break;
spin_lock(&gru->gs_lock);
for (; ctxnum < GRU_NUM_CCH; ctxnum++) {
if (flag && gru == gru0 && ctxnum == ctxnum0)
if (gru_check_chiplet_assignment(gru, gts)) {
if (check_gru_resources(gru, cbr, dsr, GRU_NUM_CCH))
break;
ngts = gru->gs_gts[ctxnum];
/*
* We are grabbing locks out of order, so trylock is
* needed. GTSs are usually not locked, so the odds of
* success are high. If trylock fails, try to steal a
* different GSEG.
*/
if (ngts && is_gts_stealable(ngts, blade))
spin_lock(&gru->gs_lock);
for (; ctxnum < GRU_NUM_CCH; ctxnum++) {
if (flag && gru == gru0 && ctxnum == ctxnum0)
break;
ngts = gru->gs_gts[ctxnum];
/*
* We are grabbing locks out of order, so trylock is
* needed. GTSs are usually not locked, so the odds of
* success are high. If trylock fails, try to steal a
* different GSEG.
*/
if (ngts && is_gts_stealable(ngts, blade))
break;
ngts = NULL;
}
spin_unlock(&gru->gs_lock);
if (ngts || (flag && gru == gru0 && ctxnum == ctxnum0))
break;
ngts = NULL;
flag = 1;
}
spin_unlock(&gru->gs_lock);
if (ngts || (flag && gru == gru0 && ctxnum == ctxnum0))
if (flag && gru == gru0)
break;
flag = 1;
ctxnum = 0;
gru = next_gru(blade, gru);
}
blade->bs_lru_gru = gru;
blade->bs_lru_ctxnum = ctxnum;
spin_unlock(&blade->bs_lock);
if (ngts) {
@ -775,20 +850,35 @@ void gru_steal_context(struct gru_thread_state *gts, int blade_id)
hweight64(gru->gs_dsr_map));
}
/*
* Assign a gru context.
*/
static int gru_assign_context_number(struct gru_state *gru)
{
int ctxnum;
ctxnum = find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH);
__set_bit(ctxnum, &gru->gs_context_map);
return ctxnum;
}
/*
* Scan the GRUs on the local blade & assign a GRU context.
*/
struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts,
int blade)
struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts)
{
struct gru_state *gru, *grux;
int i, max_active_contexts;
int blade_id = gts->ts_user_blade_id;
if (blade_id < 0)
blade_id = uv_numa_blade_id();
again:
gru = NULL;
max_active_contexts = GRU_NUM_CCH;
for_each_gru_on_blade(grux, blade, i) {
for_each_gru_on_blade(grux, blade_id, i) {
if (!gru_check_chiplet_assignment(grux, gts))
continue;
if (check_gru_resources(grux, gts->ts_cbr_au_count,
gts->ts_dsr_au_count,
max_active_contexts)) {
@ -809,12 +899,9 @@ struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts,
reserve_gru_resources(gru, gts);
gts->ts_gru = gru;
gts->ts_blade = gru->gs_blade_id;
gts->ts_ctxnum =
find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH);
BUG_ON(gts->ts_ctxnum == GRU_NUM_CCH);
gts->ts_ctxnum = gru_assign_context_number(gru);
atomic_inc(&gts->ts_refcnt);
gru->gs_gts[gts->ts_ctxnum] = gts;
__set_bit(gts->ts_ctxnum, &gru->gs_context_map);
spin_unlock(&gru->gs_lock);
STAT(assign_context);
@ -842,7 +929,6 @@ int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct gru_thread_state *gts;
unsigned long paddr, vaddr;
int blade_id;
vaddr = (unsigned long)vmf->virtual_address;
gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n",
@ -857,28 +943,18 @@ int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
again:
mutex_lock(&gts->ts_ctxlock);
preempt_disable();
blade_id = uv_numa_blade_id();
if (gts->ts_gru) {
if (gts->ts_gru->gs_blade_id != blade_id) {
STAT(migrated_nopfn_unload);
gru_unload_context(gts, 1);
} else {
if (gru_retarget_intr(gts))
STAT(migrated_nopfn_retarget);
}
}
gru_check_context_placement(gts);
if (!gts->ts_gru) {
STAT(load_user_context);
if (!gru_assign_gru_context(gts, blade_id)) {
if (!gru_assign_gru_context(gts)) {
preempt_enable();
mutex_unlock(&gts->ts_ctxlock);
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(GRU_ASSIGN_DELAY); /* true hack ZZZ */
blade_id = uv_numa_blade_id();
if (gts->ts_steal_jiffies + GRU_STEAL_DELAY < jiffies)
gru_steal_context(gts, blade_id);
gru_steal_context(gts);
goto again;
}
gru_load_context(gts);

View file

@ -36,8 +36,7 @@ static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
{
unsigned long val = atomic_long_read(v);
if (val)
seq_printf(s, "%16lu %s\n", val, id);
seq_printf(s, "%16lu %s\n", val, id);
}
static int statistics_show(struct seq_file *s, void *p)
@ -46,7 +45,8 @@ static int statistics_show(struct seq_file *s, void *p)
printstat(s, vdata_free);
printstat(s, gts_alloc);
printstat(s, gts_free);
printstat(s, vdata_double_alloc);
printstat(s, gms_alloc);
printstat(s, gms_free);
printstat(s, gts_double_allocate);
printstat(s, assign_context);
printstat(s, assign_context_failed);
@ -59,28 +59,25 @@ static int statistics_show(struct seq_file *s, void *p)
printstat(s, steal_kernel_context);
printstat(s, steal_context_failed);
printstat(s, nopfn);
printstat(s, break_cow);
printstat(s, asid_new);
printstat(s, asid_next);
printstat(s, asid_wrap);
printstat(s, asid_reuse);
printstat(s, intr);
printstat(s, intr_cbr);
printstat(s, intr_tfh);
printstat(s, intr_spurious);
printstat(s, intr_mm_lock_failed);
printstat(s, call_os);
printstat(s, call_os_offnode_reference);
printstat(s, call_os_check_for_bug);
printstat(s, call_os_wait_queue);
printstat(s, user_flush_tlb);
printstat(s, user_unload_context);
printstat(s, user_exception);
printstat(s, set_context_option);
printstat(s, migrate_check);
printstat(s, migrated_retarget);
printstat(s, migrated_unload);
printstat(s, migrated_unload_delay);
printstat(s, migrated_nopfn_retarget);
printstat(s, migrated_nopfn_unload);
printstat(s, check_context_retarget_intr);
printstat(s, check_context_unload);
printstat(s, tlb_dropin);
printstat(s, tlb_preload_page);
printstat(s, tlb_dropin_fail_no_asid);
printstat(s, tlb_dropin_fail_upm);
printstat(s, tlb_dropin_fail_invalid);
@ -88,16 +85,15 @@ static int statistics_show(struct seq_file *s, void *p)
printstat(s, tlb_dropin_fail_idle);
printstat(s, tlb_dropin_fail_fmm);
printstat(s, tlb_dropin_fail_no_exception);
printstat(s, tlb_dropin_fail_no_exception_war);
printstat(s, tfh_stale_on_fault);
printstat(s, mmu_invalidate_range);
printstat(s, mmu_invalidate_page);
printstat(s, mmu_clear_flush_young);
printstat(s, flush_tlb);
printstat(s, flush_tlb_gru);
printstat(s, flush_tlb_gru_tgh);
printstat(s, flush_tlb_gru_zero_asid);
printstat(s, copy_gpa);
printstat(s, read_gpa);
printstat(s, mesq_receive);
printstat(s, mesq_receive_none);
printstat(s, mesq_send);
@ -108,7 +104,6 @@ static int statistics_show(struct seq_file *s, void *p)
printstat(s, mesq_send_qlimit_reached);
printstat(s, mesq_send_amo_nacked);
printstat(s, mesq_send_put_nacked);
printstat(s, mesq_qf_not_full);
printstat(s, mesq_qf_locked);
printstat(s, mesq_qf_noop_not_full);
printstat(s, mesq_qf_switch_head_failed);
@ -118,6 +113,7 @@ static int statistics_show(struct seq_file *s, void *p)
printstat(s, mesq_noop_qlimit_reached);
printstat(s, mesq_noop_amo_nacked);
printstat(s, mesq_noop_put_nacked);
printstat(s, mesq_noop_page_overflow);
return 0;
}
@ -133,8 +129,10 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
int op;
unsigned long total, count, max;
static char *id[] = {"cch_allocate", "cch_start", "cch_interrupt",
"cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
"cch_interrupt_sync", "cch_deallocate", "tfh_write_only",
"tfh_write_restart", "tgh_invalidate"};
seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
for (op = 0; op < mcsop_last; op++) {
count = atomic_long_read(&mcs_op_statistics[op].count);
total = atomic_long_read(&mcs_op_statistics[op].total);
@ -154,6 +152,7 @@ static ssize_t mcs_statistics_write(struct file *file,
static int options_show(struct seq_file *s, void *p)
{
seq_printf(s, "#bitmask: 1=trace, 2=statistics\n");
seq_printf(s, "0x%lx\n", gru_options);
return 0;
}
@ -183,16 +182,17 @@ static int cch_seq_show(struct seq_file *file, void *data)
const char *mode[] = { "??", "UPM", "INTR", "OS_POLL" };
if (gid == 0)
seq_printf(file, "#%5s%5s%6s%9s%6s%8s%8s\n", "gid", "bid",
"ctx#", "pid", "cbrs", "dsbytes", "mode");
seq_printf(file, "#%5s%5s%6s%7s%9s%6s%8s%8s\n", "gid", "bid",
"ctx#", "asid", "pid", "cbrs", "dsbytes", "mode");
if (gru)
for (i = 0; i < GRU_NUM_CCH; i++) {
ts = gru->gs_gts[i];
if (!ts)
continue;
seq_printf(file, " %5d%5d%6d%9d%6d%8d%8s\n",
seq_printf(file, " %5d%5d%6d%7d%9d%6d%8d%8s\n",
gru->gs_gid, gru->gs_blade_id, i,
ts->ts_tgid_owner,
is_kernel_context(ts) ? 0 : ts->ts_gms->ms_asids[gid].mt_asid,
is_kernel_context(ts) ? 0 : ts->ts_tgid_owner,
ts->ts_cbr_au_count * GRU_CBR_AU_SIZE,
ts->ts_cbr_au_count * GRU_DSR_AU_BYTES,
mode[ts->ts_user_options &
@ -355,7 +355,7 @@ static void delete_proc_files(void)
for (p = proc_files; p->name; p++)
if (p->entry)
remove_proc_entry(p->name, proc_gru);
remove_proc_entry("gru", NULL);
remove_proc_entry("gru", proc_gru->parent);
}
}

View file

@ -161,7 +161,7 @@ extern unsigned int gru_max_gids;
#define GRU_MAX_GRUS (GRU_MAX_BLADES * GRU_CHIPLETS_PER_BLADE)
#define GRU_DRIVER_ID_STR "SGI GRU Device Driver"
#define GRU_DRIVER_VERSION_STR "0.80"
#define GRU_DRIVER_VERSION_STR "0.85"
/*
* GRU statistics.
@ -171,7 +171,8 @@ struct gru_stats_s {
atomic_long_t vdata_free;
atomic_long_t gts_alloc;
atomic_long_t gts_free;
atomic_long_t vdata_double_alloc;
atomic_long_t gms_alloc;
atomic_long_t gms_free;
atomic_long_t gts_double_allocate;
atomic_long_t assign_context;
atomic_long_t assign_context_failed;
@ -184,28 +185,25 @@ struct gru_stats_s {
atomic_long_t steal_kernel_context;
atomic_long_t steal_context_failed;
atomic_long_t nopfn;
atomic_long_t break_cow;
atomic_long_t asid_new;
atomic_long_t asid_next;
atomic_long_t asid_wrap;
atomic_long_t asid_reuse;
atomic_long_t intr;
atomic_long_t intr_cbr;
atomic_long_t intr_tfh;
atomic_long_t intr_spurious;
atomic_long_t intr_mm_lock_failed;
atomic_long_t call_os;
atomic_long_t call_os_offnode_reference;
atomic_long_t call_os_check_for_bug;
atomic_long_t call_os_wait_queue;
atomic_long_t user_flush_tlb;
atomic_long_t user_unload_context;
atomic_long_t user_exception;
atomic_long_t set_context_option;
atomic_long_t migrate_check;
atomic_long_t migrated_retarget;
atomic_long_t migrated_unload;
atomic_long_t migrated_unload_delay;
atomic_long_t migrated_nopfn_retarget;
atomic_long_t migrated_nopfn_unload;
atomic_long_t check_context_retarget_intr;
atomic_long_t check_context_unload;
atomic_long_t tlb_dropin;
atomic_long_t tlb_preload_page;
atomic_long_t tlb_dropin_fail_no_asid;
atomic_long_t tlb_dropin_fail_upm;
atomic_long_t tlb_dropin_fail_invalid;
@ -213,17 +211,16 @@ struct gru_stats_s {
atomic_long_t tlb_dropin_fail_idle;
atomic_long_t tlb_dropin_fail_fmm;
atomic_long_t tlb_dropin_fail_no_exception;
atomic_long_t tlb_dropin_fail_no_exception_war;
atomic_long_t tfh_stale_on_fault;
atomic_long_t mmu_invalidate_range;
atomic_long_t mmu_invalidate_page;
atomic_long_t mmu_clear_flush_young;
atomic_long_t flush_tlb;
atomic_long_t flush_tlb_gru;
atomic_long_t flush_tlb_gru_tgh;
atomic_long_t flush_tlb_gru_zero_asid;
atomic_long_t copy_gpa;
atomic_long_t read_gpa;
atomic_long_t mesq_receive;
atomic_long_t mesq_receive_none;
@ -235,7 +232,7 @@ struct gru_stats_s {
atomic_long_t mesq_send_qlimit_reached;
atomic_long_t mesq_send_amo_nacked;
atomic_long_t mesq_send_put_nacked;
atomic_long_t mesq_qf_not_full;
atomic_long_t mesq_page_overflow;
atomic_long_t mesq_qf_locked;
atomic_long_t mesq_qf_noop_not_full;
atomic_long_t mesq_qf_switch_head_failed;
@ -245,11 +242,13 @@ struct gru_stats_s {
atomic_long_t mesq_noop_qlimit_reached;
atomic_long_t mesq_noop_amo_nacked;
atomic_long_t mesq_noop_put_nacked;
atomic_long_t mesq_noop_page_overflow;
};
enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
cchop_deallocate, tghop_invalidate, mcsop_last};
cchop_deallocate, tfhop_write_only, tfhop_write_restart,
tghop_invalidate, mcsop_last};
struct mcs_op_statistic {
atomic_long_t count;
@ -259,8 +258,8 @@ struct mcs_op_statistic {
extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
#define OPT_DPRINT 1
#define OPT_STATS 2
#define OPT_DPRINT 1
#define OPT_STATS 2
#define IRQ_GRU 110 /* Starting IRQ number for interrupts */
@ -283,7 +282,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
#define gru_dbg(dev, fmt, x...) \
do { \
if (gru_options & OPT_DPRINT) \
dev_dbg(dev, "%s: " fmt, __func__, x); \
printk(KERN_DEBUG "GRU:%d %s: " fmt, smp_processor_id(), __func__, x);\
} while (0)
#else
#define gru_dbg(x...)
@ -297,13 +296,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
#define ASID_INC 8 /* number of regions */
/* Generate a GRU asid value from a GRU base asid & a virtual address. */
#if defined CONFIG_IA64
#define VADDR_HI_BIT 64
#elif defined CONFIG_X86_64
#define VADDR_HI_BIT 48
#else
#error "Unsupported architecture"
#endif
#define GRUREGION(addr) ((addr) >> (VADDR_HI_BIT - 3) & 3)
#define GRUASID(asid, addr) ((asid) + GRUREGION(addr))
@ -345,6 +338,7 @@ struct gru_vma_data {
long vd_user_options;/* misc user option flags */
int vd_cbr_au_count;
int vd_dsr_au_count;
unsigned char vd_tlb_preload_count;
};
/*
@ -360,6 +354,7 @@ struct gru_thread_state {
struct gru_state *ts_gru; /* GRU where the context is
loaded */
struct gru_mm_struct *ts_gms; /* asid & ioproc struct */
unsigned char ts_tlb_preload_count; /* TLB preload pages */
unsigned long ts_cbr_map; /* map of allocated CBRs */
unsigned long ts_dsr_map; /* map of allocated DATA
resources */
@ -368,6 +363,8 @@ struct gru_thread_state {
long ts_user_options;/* misc user option flags */
pid_t ts_tgid_owner; /* task that is using the
context - for migration */
short ts_user_blade_id;/* user selected blade */
char ts_user_chiplet_id;/* user selected chiplet */
unsigned short ts_sizeavail; /* Pagesizes in use */
int ts_tsid; /* thread that owns the
structure */
@ -384,13 +381,11 @@ struct gru_thread_state {
char ts_blade; /* If >= 0, migrate context if
ref from diferent blade */
char ts_force_cch_reload;
char ts_force_unload;/* force context to be unloaded
after migration */
char ts_cbr_idx[GRU_CBR_AU];/* CBR numbers of each
allocated CB */
int ts_data_valid; /* Indicates if ts_gdata has
valid data */
struct gts_statistics ustats; /* User statistics */
struct gru_gseg_statistics ustats; /* User statistics */
unsigned long ts_gdata[0]; /* save area for GRU data (CB,
DS, CBE) */
};
@ -422,6 +417,7 @@ struct gru_state {
gru segments (64) */
unsigned short gs_gid; /* unique GRU number */
unsigned short gs_blade_id; /* blade of GRU */
unsigned char gs_chiplet_id; /* blade chiplet of GRU */
unsigned char gs_tgh_local_shift; /* used to pick TGH for
local flush */
unsigned char gs_tgh_first_remote; /* starting TGH# for
@ -453,6 +449,7 @@ struct gru_state {
in use */
struct gru_thread_state *gs_gts[GRU_NUM_CCH]; /* GTS currently using
the context */
int gs_irq[GRU_NUM_TFM]; /* Interrupt irqs */
};
/*
@ -619,6 +616,15 @@ static inline int is_kernel_context(struct gru_thread_state *gts)
return !gts->ts_mm;
}
/*
* The following are for Nehelem-EX. A more general scheme is needed for
* future processors.
*/
#define UV_MAX_INT_CORES 8
#define uv_cpu_socket_number(p) ((cpu_physical_id(p) >> 5) & 1)
#define uv_cpu_ht_number(p) (cpu_physical_id(p) & 1)
#define uv_cpu_core_number(p) (((cpu_physical_id(p) >> 2) & 4) | \
((cpu_physical_id(p) >> 1) & 3))
/*-----------------------------------------------------------------------------
* Function prototypes & externs
*/
@ -633,24 +639,26 @@ extern struct gru_thread_state *gru_find_thread_state(struct vm_area_struct
*vma, int tsid);
extern struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct
*vma, int tsid);
extern struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts,
int blade);
extern struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts);
extern void gru_load_context(struct gru_thread_state *gts);
extern void gru_steal_context(struct gru_thread_state *gts, int blade_id);
extern void gru_steal_context(struct gru_thread_state *gts);
extern void gru_unload_context(struct gru_thread_state *gts, int savestate);
extern int gru_update_cch(struct gru_thread_state *gts, int force_unload);
extern int gru_update_cch(struct gru_thread_state *gts);
extern void gts_drop(struct gru_thread_state *gts);
extern void gru_tgh_flush_init(struct gru_state *gru);
extern int gru_kservices_init(void);
extern void gru_kservices_exit(void);
extern irqreturn_t gru0_intr(int irq, void *dev_id);
extern irqreturn_t gru1_intr(int irq, void *dev_id);
extern irqreturn_t gru_intr_mblade(int irq, void *dev_id);
extern int gru_dump_chiplet_request(unsigned long arg);
extern long gru_get_gseg_statistics(unsigned long arg);
extern irqreturn_t gru_intr(int irq, void *dev_id);
extern int gru_handle_user_call_os(unsigned long address);
extern int gru_user_flush_tlb(unsigned long arg);
extern int gru_user_unload_context(unsigned long arg);
extern int gru_get_exception_detail(unsigned long arg);
extern int gru_set_context_option(unsigned long address);
extern void gru_check_context_placement(struct gru_thread_state *gts);
extern int gru_cpu_fault_map_id(void);
extern struct vm_area_struct *gru_find_vma(unsigned long vaddr);
extern void gru_flush_all_tlb(struct gru_state *gru);
@ -658,7 +666,8 @@ extern int gru_proc_init(void);
extern void gru_proc_exit(void);
extern struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
int cbr_au_count, int dsr_au_count, int options, int tsid);
int cbr_au_count, int dsr_au_count,
unsigned char tlb_preload_count, int options, int tsid);
extern unsigned long gru_reserve_cb_resources(struct gru_state *gru,
int cbr_au_count, char *cbmap);
extern unsigned long gru_reserve_ds_resources(struct gru_state *gru,

View file

@ -184,8 +184,8 @@ void gru_flush_tlb_range(struct gru_mm_struct *gms, unsigned long start,
STAT(flush_tlb_gru_tgh);
asid = GRUASID(asid, start);
gru_dbg(grudev,
" FLUSH gruid %d, asid 0x%x, num %ld, cbmap 0x%x\n",
gid, asid, num, asids->mt_ctxbitmap);
" FLUSH gruid %d, asid 0x%x, vaddr 0x%lx, vamask 0x%x, num %ld, cbmap 0x%x\n",
gid, asid, start, grupagesize, num, asids->mt_ctxbitmap);
tgh = get_lock_tgh_handle(gru);
tgh_invalidate(tgh, start, ~0, asid, grupagesize, 0,
num - 1, asids->mt_ctxbitmap);
@ -299,6 +299,7 @@ struct gru_mm_struct *gru_register_mmu_notifier(void)
{
struct gru_mm_struct *gms;
struct mmu_notifier *mn;
int err;
mn = mmu_find_ops(current->mm, &gru_mmuops);
if (mn) {
@ -307,16 +308,22 @@ struct gru_mm_struct *gru_register_mmu_notifier(void)
} else {
gms = kzalloc(sizeof(*gms), GFP_KERNEL);
if (gms) {
STAT(gms_alloc);
spin_lock_init(&gms->ms_asid_lock);
gms->ms_notifier.ops = &gru_mmuops;
atomic_set(&gms->ms_refcnt, 1);
init_waitqueue_head(&gms->ms_wait_queue);
__mmu_notifier_register(&gms->ms_notifier, current->mm);
err = __mmu_notifier_register(&gms->ms_notifier, current->mm);
if (err)
goto error;
}
}
gru_dbg(grudev, "gms %p, refcnt %d\n", gms,
atomic_read(&gms->ms_refcnt));
return gms;
error:
kfree(gms);
return ERR_PTR(err);
}
void gru_drop_mmu_notifier(struct gru_mm_struct *gms)
@ -327,6 +334,7 @@ void gru_drop_mmu_notifier(struct gru_mm_struct *gms)
if (!gms->ms_released)
mmu_notifier_unregister(&gms->ms_notifier, current->mm);
kfree(gms);
STAT(gms_free);
}
}

View file

@ -339,6 +339,7 @@ extern short xp_partition_id;
extern u8 xp_region_size;
extern unsigned long (*xp_pa) (void *);
extern unsigned long (*xp_socket_pa) (unsigned long);
extern enum xp_retval (*xp_remote_memcpy) (unsigned long, const unsigned long,
size_t);
extern int (*xp_cpu_to_nasid) (int);

View file

@ -44,6 +44,9 @@ EXPORT_SYMBOL_GPL(xp_region_size);
unsigned long (*xp_pa) (void *addr);
EXPORT_SYMBOL_GPL(xp_pa);
unsigned long (*xp_socket_pa) (unsigned long gpa);
EXPORT_SYMBOL_GPL(xp_socket_pa);
enum xp_retval (*xp_remote_memcpy) (unsigned long dst_gpa,
const unsigned long src_gpa, size_t len);
EXPORT_SYMBOL_GPL(xp_remote_memcpy);

View file

@ -83,6 +83,15 @@ xp_pa_sn2(void *addr)
return __pa(addr);
}
/*
* Convert a global physical to a socket physical address.
*/
static unsigned long
xp_socket_pa_sn2(unsigned long gpa)
{
return gpa;
}
/*
* Wrapper for bte_copy().
*
@ -162,6 +171,7 @@ xp_init_sn2(void)
xp_region_size = sn_region_size;
xp_pa = xp_pa_sn2;
xp_socket_pa = xp_socket_pa_sn2;
xp_remote_memcpy = xp_remote_memcpy_sn2;
xp_cpu_to_nasid = xp_cpu_to_nasid_sn2;
xp_expand_memprotect = xp_expand_memprotect_sn2;

View file

@ -32,12 +32,44 @@ xp_pa_uv(void *addr)
return uv_gpa(addr);
}
/*
* Convert a global physical to socket physical address.
*/
static unsigned long
xp_socket_pa_uv(unsigned long gpa)
{
return uv_gpa_to_soc_phys_ram(gpa);
}
static enum xp_retval
xp_remote_mmr_read(unsigned long dst_gpa, const unsigned long src_gpa,
size_t len)
{
int ret;
unsigned long *dst_va = __va(uv_gpa_to_soc_phys_ram(dst_gpa));
BUG_ON(!uv_gpa_in_mmr_space(src_gpa));
BUG_ON(len != 8);
ret = gru_read_gpa(dst_va, src_gpa);
if (ret == 0)
return xpSuccess;
dev_err(xp, "gru_read_gpa() failed, dst_gpa=0x%016lx src_gpa=0x%016lx "
"len=%ld\n", dst_gpa, src_gpa, len);
return xpGruCopyError;
}
static enum xp_retval
xp_remote_memcpy_uv(unsigned long dst_gpa, const unsigned long src_gpa,
size_t len)
{
int ret;
if (uv_gpa_in_mmr_space(src_gpa))
return xp_remote_mmr_read(dst_gpa, src_gpa, len);
ret = gru_copy_gpa(dst_gpa, src_gpa, len);
if (ret == 0)
return xpSuccess;
@ -123,6 +155,7 @@ xp_init_uv(void)
xp_region_size = sn_region_size;
xp_pa = xp_pa_uv;
xp_socket_pa = xp_socket_pa_uv;
xp_remote_memcpy = xp_remote_memcpy_uv;
xp_cpu_to_nasid = xp_cpu_to_nasid_uv;
xp_expand_memprotect = xp_expand_memprotect_uv;

View file

@ -18,6 +18,7 @@
#include <linux/device.h>
#include <linux/hardirq.h>
#include "xpc.h"
#include <asm/uv/uv_hub.h>
/* XPC is exiting flag */
int xpc_exiting;
@ -92,8 +93,12 @@ xpc_get_rsvd_page_pa(int nasid)
break;
/* !!! L1_CACHE_ALIGN() is only a sn2-bte_copy requirement */
if (L1_CACHE_ALIGN(len) > buf_len) {
kfree(buf_base);
if (is_shub())
len = L1_CACHE_ALIGN(len);
if (len > buf_len) {
if (buf_base != NULL)
kfree(buf_base);
buf_len = L1_CACHE_ALIGN(len);
buf = xpc_kmalloc_cacheline_aligned(buf_len, GFP_KERNEL,
&buf_base);
@ -105,7 +110,7 @@ xpc_get_rsvd_page_pa(int nasid)
}
}
ret = xp_remote_memcpy(xp_pa(buf), rp_pa, buf_len);
ret = xp_remote_memcpy(xp_pa(buf), rp_pa, len);
if (ret != xpSuccess) {
dev_dbg(xpc_part, "xp_remote_memcpy failed %d\n", ret);
break;
@ -143,7 +148,7 @@ xpc_setup_rsvd_page(void)
dev_err(xpc_part, "SAL failed to locate the reserved page\n");
return -ESRCH;
}
rp = (struct xpc_rsvd_page *)__va(rp_pa);
rp = (struct xpc_rsvd_page *)__va(xp_socket_pa(rp_pa));
if (rp->SAL_version < 3) {
/* SAL_versions < 3 had a SAL_partid defined as a u8 */

View file

@ -157,22 +157,24 @@ xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq)
{
int ret;
#if defined CONFIG_X86_64
ret = uv_bios_mq_watchlist_alloc(mq->mmr_blade, uv_gpa(mq->address),
mq->order, &mq->mmr_offset);
if (ret < 0) {
dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, "
"ret=%d\n", ret);
return ret;
}
#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
ret = sn_mq_watchlist_alloc(mq->mmr_blade, (void *)uv_gpa(mq->address),
#if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
ret = sn_mq_watchlist_alloc(mmr_pnode, (void *)uv_gpa(mq->address),
mq->order, &mq->mmr_offset);
if (ret < 0) {
dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n",
ret);
return -EBUSY;
}
#elif defined CONFIG_X86_64
ret = uv_bios_mq_watchlist_alloc(uv_gpa(mq->address),
mq->order, &mq->mmr_offset);
if (ret < 0) {
dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, "
"ret=%d\n", ret);
return ret;
}
#else
#error not a supported configuration
#endif
@ -185,12 +187,13 @@ static void
xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq)
{
int ret;
int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
#if defined CONFIG_X86_64
ret = uv_bios_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num);
ret = uv_bios_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
BUG_ON(ret != BIOS_STATUS_SUCCESS);
#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
ret = sn_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num);
ret = sn_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
BUG_ON(ret != SALRET_OK);
#else
#error not a supported configuration
@ -204,6 +207,7 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
enum xp_retval xp_ret;
int ret;
int nid;
int nasid;
int pg_order;
struct page *page;
struct xpc_gru_mq_uv *mq;
@ -259,9 +263,11 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
goto out_5;
}
nasid = UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpu));
mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value;
ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size,
nid, mmr_value->vector, mmr_value->dest);
nasid, mmr_value->vector, mmr_value->dest);
if (ret != 0) {
dev_err(xpc_part, "gru_create_message_queue() returned "
"error=%d\n", ret);
@ -946,11 +952,13 @@ xpc_get_fifo_entry_uv(struct xpc_fifo_head_uv *head)
head->first = first->next;
if (head->first == NULL)
head->last = NULL;
head->n_entries--;
BUG_ON(head->n_entries < 0);
first->next = NULL;
}
head->n_entries--;
BUG_ON(head->n_entries < 0);
spin_unlock_irqrestore(&head->lock, irq_flags);
first->next = NULL;
return first;
}
@ -1019,7 +1027,8 @@ xpc_make_first_contact_uv(struct xpc_partition *part)
xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV);
while (part->sn.uv.remote_act_state != XPC_P_AS_ACTIVATING) {
while (!((part->sn.uv.remote_act_state == XPC_P_AS_ACTIVATING) ||
(part->sn.uv.remote_act_state == XPC_P_AS_ACTIVE))) {
dev_dbg(xpc_part, "waiting to make first contact with "
"partition %d\n", XPC_PARTID(part));
@ -1422,7 +1431,6 @@ xpc_handle_notify_mq_msg_uv(struct xpc_partition *part,
msg_slot = ch_uv->recv_msg_slots +
(msg->hdr.msg_slot_number % ch->remote_nentries) * ch->entry_size;
BUG_ON(msg->hdr.msg_slot_number != msg_slot->hdr.msg_slot_number);
BUG_ON(msg_slot->hdr.size != 0);
memcpy(msg_slot, msg, msg->hdr.size);
@ -1646,8 +1654,6 @@ xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
sizeof(struct xpc_notify_mq_msghdr_uv));
if (ret != xpSuccess)
XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
msg->hdr.msg_slot_number += ch->remote_nentries;
}
static struct xpc_arch_operations xpc_arch_ops_uv = {

View file

@ -72,35 +72,6 @@ void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj)
mlx4_bitmap_free_range(bitmap, obj, 1);
}
static unsigned long find_aligned_range(unsigned long *bitmap,
u32 start, u32 nbits,
int len, int align)
{
unsigned long end, i;
again:
start = ALIGN(start, align);
while ((start < nbits) && test_bit(start, bitmap))
start += align;
if (start >= nbits)
return -1;
end = start+len;
if (end > nbits)
return -1;
for (i = start + 1; i < end; i++) {
if (test_bit(i, bitmap)) {
start = i + 1;
goto again;
}
}
return start;
}
u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
{
u32 obj, i;
@ -110,13 +81,13 @@ u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
spin_lock(&bitmap->lock);
obj = find_aligned_range(bitmap->table, bitmap->last,
bitmap->max, cnt, align);
obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
bitmap->last, cnt, align - 1);
if (obj >= bitmap->max) {
bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
& bitmap->mask;
obj = find_aligned_range(bitmap->table, 0, bitmap->max,
cnt, align);
obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
0, cnt, align - 1);
}
if (obj < bitmap->max) {

View file

@ -3403,7 +3403,7 @@ static int __init parport_parse_param(const char *s, int *val,
*val = automatic;
else if (!strncmp(s, "none", 4))
*val = none;
else if (nofifo && !strncmp(s, "nofifo", 4))
else if (nofifo && !strncmp(s, "nofifo", 6))
*val = nofifo;
else {
char *ep;

View file

@ -24,6 +24,7 @@
#include <linux/types.h>
#include <linux/proc_fs.h>
#include <linux/pnp.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <asm/uaccess.h>
@ -33,42 +34,65 @@
static struct proc_dir_entry *proc_pnp = NULL;
static struct proc_dir_entry *proc_pnp_boot = NULL;
static int proc_read_pnpconfig(char *buf, char **start, off_t pos,
int count, int *eof, void *data)
static int pnpconfig_proc_show(struct seq_file *m, void *v)
{
struct pnp_isa_config_struc pnps;
if (pnp_bios_isapnp_config(&pnps))
return -EIO;
return snprintf(buf, count,
"structure_revision %d\n"
"number_of_CSNs %d\n"
"ISA_read_data_port 0x%x\n",
pnps.revision, pnps.no_csns, pnps.isa_rd_data_port);
seq_printf(m, "structure_revision %d\n"
"number_of_CSNs %d\n"
"ISA_read_data_port 0x%x\n",
pnps.revision, pnps.no_csns, pnps.isa_rd_data_port);
return 0;
}
static int proc_read_escdinfo(char *buf, char **start, off_t pos,
int count, int *eof, void *data)
static int pnpconfig_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, pnpconfig_proc_show, NULL);
}
static const struct file_operations pnpconfig_proc_fops = {
.owner = THIS_MODULE,
.open = pnpconfig_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int escd_info_proc_show(struct seq_file *m, void *v)
{
struct escd_info_struc escd;
if (pnp_bios_escd_info(&escd))
return -EIO;
return snprintf(buf, count,
"min_ESCD_write_size %d\n"
seq_printf(m, "min_ESCD_write_size %d\n"
"ESCD_size %d\n"
"NVRAM_base 0x%x\n",
escd.min_escd_write_size,
escd.escd_size, escd.nv_storage_base);
return 0;
}
static int escd_info_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, escd_info_proc_show, NULL);
}
static const struct file_operations escd_info_proc_fops = {
.owner = THIS_MODULE,
.open = escd_info_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
#define MAX_SANE_ESCD_SIZE (32*1024)
static int proc_read_escd(char *buf, char **start, off_t pos,
int count, int *eof, void *data)
static int escd_proc_show(struct seq_file *m, void *v)
{
struct escd_info_struc escd;
char *tmpbuf;
int escd_size, escd_left_to_read, n;
int escd_size;
if (pnp_bios_escd_info(&escd))
return -EIO;
@ -76,7 +100,7 @@ static int proc_read_escd(char *buf, char **start, off_t pos,
/* sanity check */
if (escd.escd_size > MAX_SANE_ESCD_SIZE) {
printk(KERN_ERR
"PnPBIOS: proc_read_escd: ESCD size reported by BIOS escd_info call is too great\n");
"PnPBIOS: %s: ESCD size reported by BIOS escd_info call is too great\n", __func__);
return -EFBIG;
}
@ -94,56 +118,75 @@ static int proc_read_escd(char *buf, char **start, off_t pos,
/* sanity check */
if (escd_size > MAX_SANE_ESCD_SIZE) {
printk(KERN_ERR "PnPBIOS: proc_read_escd: ESCD size reported by"
" BIOS read_escd call is too great\n");
printk(KERN_ERR "PnPBIOS: %s: ESCD size reported by"
" BIOS read_escd call is too great\n", __func__);
kfree(tmpbuf);
return -EFBIG;
}
escd_left_to_read = escd_size - pos;
if (escd_left_to_read < 0)
escd_left_to_read = 0;
if (escd_left_to_read == 0)
*eof = 1;
n = min(count, escd_left_to_read);
memcpy(buf, tmpbuf + pos, n);
seq_write(m, tmpbuf, escd_size);
kfree(tmpbuf);
*start = buf;
return n;
return 0;
}
static int proc_read_legacyres(char *buf, char **start, off_t pos,
int count, int *eof, void *data)
static int escd_proc_open(struct inode *inode, struct file *file)
{
/* Assume that the following won't overflow the buffer */
if (pnp_bios_get_stat_res(buf))
return -EIO;
return count; // FIXME: Return actual length
return single_open(file, escd_proc_show, NULL);
}
static int proc_read_devices(char *buf, char **start, off_t pos,
int count, int *eof, void *data)
static const struct file_operations escd_proc_fops = {
.owner = THIS_MODULE,
.open = escd_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int pnp_legacyres_proc_show(struct seq_file *m, void *v)
{
void *buf;
buf = kmalloc(65536, GFP_KERNEL);
if (!buf)
return -ENOMEM;
if (pnp_bios_get_stat_res(buf)) {
kfree(buf);
return -EIO;
}
seq_write(m, buf, 65536);
kfree(buf);
return 0;
}
static int pnp_legacyres_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, pnp_legacyres_proc_show, NULL);
}
static const struct file_operations pnp_legacyres_proc_fops = {
.owner = THIS_MODULE,
.open = pnp_legacyres_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int pnp_devices_proc_show(struct seq_file *m, void *v)
{
struct pnp_bios_node *node;
u8 nodenum;
char *p = buf;
if (pos >= 0xff)
return 0;
node = kzalloc(node_info.max_node_size, GFP_KERNEL);
if (!node)
return -ENOMEM;
for (nodenum = pos; nodenum < 0xff;) {
for (nodenum = 0; nodenum < 0xff;) {
u8 thisnodenum = nodenum;
/* 26 = the number of characters per line sprintf'ed */
if ((p - buf + 26) > count)
break;
if (pnp_bios_get_dev_node(&nodenum, PNPMODE_DYNAMIC, node))
break;
p += sprintf(p, "%02x\t%08x\t%02x:%02x:%02x\t%04x\n",
seq_printf(m, "%02x\t%08x\t%02x:%02x:%02x\t%04x\n",
node->handle, node->eisa_id,
node->type_code[0], node->type_code[1],
node->type_code[2], node->flags);
@ -153,20 +196,29 @@ static int proc_read_devices(char *buf, char **start, off_t pos,
"PnPBIOS: proc_read_devices:",
(unsigned int)nodenum,
(unsigned int)thisnodenum);
*eof = 1;
break;
}
}
kfree(node);
if (nodenum == 0xff)
*eof = 1;
*start = (char *)((off_t) nodenum - pos);
return p - buf;
return 0;
}
static int proc_read_node(char *buf, char **start, off_t pos,
int count, int *eof, void *data)
static int pnp_devices_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, pnp_devices_proc_show, NULL);
}
static const struct file_operations pnp_devices_proc_fops = {
.owner = THIS_MODULE,
.open = pnp_devices_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int pnpbios_proc_show(struct seq_file *m, void *v)
{
void *data = m->private;
struct pnp_bios_node *node;
int boot = (long)data >> 8;
u8 nodenum = (long)data;
@ -180,14 +232,20 @@ static int proc_read_node(char *buf, char **start, off_t pos,
return -EIO;
}
len = node->size - sizeof(struct pnp_bios_node);
memcpy(buf, node->data, len);
seq_write(m, node->data, len);
kfree(node);
return len;
return 0;
}
static int proc_write_node(struct file *file, const char __user * buf,
unsigned long count, void *data)
static int pnpbios_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, pnpbios_proc_show, PDE(inode)->data);
}
static ssize_t pnpbios_proc_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
void *data = PDE(file->f_path.dentry->d_inode)->data;
struct pnp_bios_node *node;
int boot = (long)data >> 8;
u8 nodenum = (long)data;
@ -218,34 +276,33 @@ static int proc_write_node(struct file *file, const char __user * buf,
return ret;
}
static const struct file_operations pnpbios_proc_fops = {
.owner = THIS_MODULE,
.open = pnpbios_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = pnpbios_proc_write,
};
int pnpbios_interface_attach_device(struct pnp_bios_node *node)
{
char name[3];
struct proc_dir_entry *ent;
sprintf(name, "%02x", node->handle);
if (!proc_pnp)
return -EIO;
if (!pnpbios_dont_use_current_config) {
ent = create_proc_entry(name, 0, proc_pnp);
if (ent) {
ent->read_proc = proc_read_node;
ent->write_proc = proc_write_node;
ent->data = (void *)(long)(node->handle);
}
proc_create_data(name, 0644, proc_pnp, &pnpbios_proc_fops,
(void *)(long)(node->handle));
}
if (!proc_pnp_boot)
return -EIO;
ent = create_proc_entry(name, 0, proc_pnp_boot);
if (ent) {
ent->read_proc = proc_read_node;
ent->write_proc = proc_write_node;
ent->data = (void *)(long)(node->handle + 0x100);
if (proc_create_data(name, 0644, proc_pnp_boot, &pnpbios_proc_fops,
(void *)(long)(node->handle + 0x100)))
return 0;
}
return -EIO;
}
@ -262,14 +319,11 @@ int __init pnpbios_proc_init(void)
proc_pnp_boot = proc_mkdir("boot", proc_pnp);
if (!proc_pnp_boot)
return -EIO;
create_proc_read_entry("devices", 0, proc_pnp, proc_read_devices, NULL);
create_proc_read_entry("configuration_info", 0, proc_pnp,
proc_read_pnpconfig, NULL);
create_proc_read_entry("escd_info", 0, proc_pnp, proc_read_escdinfo,
NULL);
create_proc_read_entry("escd", S_IRUSR, proc_pnp, proc_read_escd, NULL);
create_proc_read_entry("legacy_device_resources", 0, proc_pnp,
proc_read_legacyres, NULL);
proc_create("devices", 0, proc_pnp, &pnp_devices_proc_fops);
proc_create("configuration_info", 0, proc_pnp, &pnpconfig_proc_fops);
proc_create("escd_info", 0, proc_pnp, &escd_info_proc_fops);
proc_create("escd", S_IRUSR, proc_pnp, &escd_proc_fops);
proc_create("legacy_device_resources", 0, proc_pnp, &pnp_legacyres_proc_fops);
return 0;
}

View file

@ -242,6 +242,15 @@ config RTC_DRV_M41T80_WDT
If you say Y here you will get support for the
watchdog timer in the ST M41T60 and M41T80 RTC chips series.
config RTC_DRV_BQ32K
tristate "TI BQ32000"
help
If you say Y here you will get support for the TI
BQ32000 I2C RTC chip.
This driver can also be built as a module. If so, the module
will be called rtc-bq32k.
config RTC_DRV_DM355EVM
tristate "TI DaVinci DM355 EVM RTC"
depends on MFD_DM355EVM_MSP
@ -592,15 +601,22 @@ config RTC_DRV_AB3100
Select this to enable the ST-Ericsson AB3100 Mixed Signal IC RTC
support. This chip contains a battery- and capacitor-backed RTC.
config RTC_DRV_NUC900
tristate "NUC910/NUC920 RTC driver"
depends on RTC_CLASS && ARCH_W90X900
help
If you say yes here you get support for the RTC subsystem of the
NUC910/NUC920 used in embedded systems.
comment "on-CPU RTC drivers"
config RTC_DRV_OMAP
tristate "TI OMAP1"
depends on ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730
depends on ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730 || ARCH_DAVINCI_DA8XX
help
Say "yes" here to support the real time clock on TI OMAP1 chips.
This driver can also be built as a module called rtc-omap.
Say "yes" here to support the real time clock on TI OMAP1 and
DA8xx/OMAP-L13x chips. This driver can also be built as a
module called rtc-omap.
config RTC_DRV_S3C
tristate "Samsung S3C series SoC RTC"
@ -846,4 +862,10 @@ config RTC_DRV_PCAP
If you say Y here you will get support for the RTC found on
the PCAP2 ASIC used on some Motorola phones.
config RTC_DRV_MC13783
depends on MFD_MC13783
tristate "Freescale MC13783 RTC"
help
This enables support for the Freescale MC13783 PMIC RTC
endif # RTC_CLASS

View file

@ -23,6 +23,7 @@ obj-$(CONFIG_RTC_DRV_AT91RM9200)+= rtc-at91rm9200.o
obj-$(CONFIG_RTC_DRV_AT91SAM9) += rtc-at91sam9.o
obj-$(CONFIG_RTC_DRV_AU1XXX) += rtc-au1xxx.o
obj-$(CONFIG_RTC_DRV_BFIN) += rtc-bfin.o
obj-$(CONFIG_RTC_DRV_BQ32K) += rtc-bq32k.o
obj-$(CONFIG_RTC_DRV_BQ4802) += rtc-bq4802.o
obj-$(CONFIG_RTC_DRV_CMOS) += rtc-cmos.o
obj-$(CONFIG_RTC_DRV_COH901331) += rtc-coh901331.o
@ -52,8 +53,10 @@ obj-$(CONFIG_RTC_DRV_M48T86) += rtc-m48t86.o
obj-$(CONFIG_RTC_MXC) += rtc-mxc.o
obj-$(CONFIG_RTC_DRV_MAX6900) += rtc-max6900.o
obj-$(CONFIG_RTC_DRV_MAX6902) += rtc-max6902.o
obj-$(CONFIG_RTC_DRV_MC13783) += rtc-mc13783.o
obj-$(CONFIG_RTC_DRV_MSM6242) += rtc-msm6242.o
obj-$(CONFIG_RTC_DRV_MV) += rtc-mv.o
obj-$(CONFIG_RTC_DRV_NUC900) += rtc-nuc900.o
obj-$(CONFIG_RTC_DRV_OMAP) += rtc-omap.o
obj-$(CONFIG_RTC_DRV_PCAP) += rtc-pcap.o
obj-$(CONFIG_RTC_DRV_PCF8563) += rtc-pcf8563.o

View file

@ -256,6 +256,8 @@ static int __init at32_rtc_probe(struct platform_device *pdev)
goto out_iounmap;
}
platform_set_drvdata(pdev, rtc);
rtc->rtc = rtc_device_register(pdev->name, &pdev->dev,
&at32_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc->rtc)) {
@ -264,7 +266,6 @@ static int __init at32_rtc_probe(struct platform_device *pdev)
goto out_free_irq;
}
platform_set_drvdata(pdev, rtc);
device_init_wakeup(&pdev->dev, 1);
dev_info(&pdev->dev, "Atmel RTC for AT32AP700x at %08lx irq %ld\n",
@ -273,6 +274,7 @@ static int __init at32_rtc_probe(struct platform_device *pdev)
return 0;
out_free_irq:
platform_set_drvdata(pdev, NULL);
free_irq(irq, rtc);
out_iounmap:
iounmap(rtc->regs);

204
drivers/rtc/rtc-bq32k.c Normal file
View file

@ -0,0 +1,204 @@
/*
* Driver for TI BQ32000 RTC.
*
* Copyright (C) 2009 Semihalf.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/rtc.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/bcd.h>
#define BQ32K_SECONDS 0x00 /* Seconds register address */
#define BQ32K_SECONDS_MASK 0x7F /* Mask over seconds value */
#define BQ32K_STOP 0x80 /* Oscillator Stop flat */
#define BQ32K_MINUTES 0x01 /* Minutes register address */
#define BQ32K_MINUTES_MASK 0x7F /* Mask over minutes value */
#define BQ32K_OF 0x80 /* Oscillator Failure flag */
#define BQ32K_HOURS_MASK 0x3F /* Mask over hours value */
#define BQ32K_CENT 0x40 /* Century flag */
#define BQ32K_CENT_EN 0x80 /* Century flag enable bit */
struct bq32k_regs {
uint8_t seconds;
uint8_t minutes;
uint8_t cent_hours;
uint8_t day;
uint8_t date;
uint8_t month;
uint8_t years;
};
static struct i2c_driver bq32k_driver;
static int bq32k_read(struct device *dev, void *data, uint8_t off, uint8_t len)
{
struct i2c_client *client = to_i2c_client(dev);
struct i2c_msg msgs[] = {
{
.addr = client->addr,
.flags = 0,
.len = 1,
.buf = &off,
}, {
.addr = client->addr,
.flags = I2C_M_RD,
.len = len,
.buf = data,
}
};
if (i2c_transfer(client->adapter, msgs, 2) == 2)
return 0;
return -EIO;
}
static int bq32k_write(struct device *dev, void *data, uint8_t off, uint8_t len)
{
struct i2c_client *client = to_i2c_client(dev);
uint8_t buffer[len + 1];
buffer[0] = off;
memcpy(&buffer[1], data, len);
if (i2c_master_send(client, buffer, len + 1) == len + 1)
return 0;
return -EIO;
}
static int bq32k_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct bq32k_regs regs;
int error;
error = bq32k_read(dev, &regs, 0, sizeof(regs));
if (error)
return error;
tm->tm_sec = bcd2bin(regs.seconds & BQ32K_SECONDS_MASK);
tm->tm_min = bcd2bin(regs.minutes & BQ32K_SECONDS_MASK);
tm->tm_hour = bcd2bin(regs.cent_hours & BQ32K_HOURS_MASK);
tm->tm_mday = bcd2bin(regs.date);
tm->tm_wday = bcd2bin(regs.day) - 1;
tm->tm_mon = bcd2bin(regs.month) - 1;
tm->tm_year = bcd2bin(regs.years) +
((regs.cent_hours & BQ32K_CENT) ? 100 : 0);
return rtc_valid_tm(tm);
}
static int bq32k_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct bq32k_regs regs;
regs.seconds = bin2bcd(tm->tm_sec);
regs.minutes = bin2bcd(tm->tm_min);
regs.cent_hours = bin2bcd(tm->tm_hour) | BQ32K_CENT_EN;
regs.day = bin2bcd(tm->tm_wday + 1);
regs.date = bin2bcd(tm->tm_mday);
regs.month = bin2bcd(tm->tm_mon + 1);
if (tm->tm_year >= 100) {
regs.cent_hours |= BQ32K_CENT;
regs.years = bin2bcd(tm->tm_year - 100);
} else
regs.years = bin2bcd(tm->tm_year);
return bq32k_write(dev, &regs, 0, sizeof(regs));
}
static const struct rtc_class_ops bq32k_rtc_ops = {
.read_time = bq32k_rtc_read_time,
.set_time = bq32k_rtc_set_time,
};
static int bq32k_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
struct rtc_device *rtc;
uint8_t reg;
int error;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -ENODEV;
/* Check Oscillator Stop flag */
error = bq32k_read(dev, &reg, BQ32K_SECONDS, 1);
if (!error && (reg & BQ32K_STOP)) {
dev_warn(dev, "Oscillator was halted. Restarting...\n");
reg &= ~BQ32K_STOP;
error = bq32k_write(dev, &reg, BQ32K_SECONDS, 1);
}
if (error)
return error;
/* Check Oscillator Failure flag */
error = bq32k_read(dev, &reg, BQ32K_MINUTES, 1);
if (!error && (reg & BQ32K_OF)) {
dev_warn(dev, "Oscillator Failure. Check RTC battery.\n");
reg &= ~BQ32K_OF;
error = bq32k_write(dev, &reg, BQ32K_MINUTES, 1);
}
if (error)
return error;
rtc = rtc_device_register(bq32k_driver.driver.name, &client->dev,
&bq32k_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
i2c_set_clientdata(client, rtc);
return 0;
}
static int __devexit bq32k_remove(struct i2c_client *client)
{
struct rtc_device *rtc = i2c_get_clientdata(client);
rtc_device_unregister(rtc);
return 0;
}
static const struct i2c_device_id bq32k_id[] = {
{ "bq32000", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, bq32k_id);
static struct i2c_driver bq32k_driver = {
.driver = {
.name = "bq32k",
.owner = THIS_MODULE,
},
.probe = bq32k_probe,
.remove = __devexit_p(bq32k_remove),
.id_table = bq32k_id,
};
static __init int bq32k_init(void)
{
return i2c_add_driver(&bq32k_driver);
}
module_init(bq32k_init);
static __exit void bq32k_exit(void)
{
i2c_del_driver(&bq32k_driver);
}
module_exit(bq32k_exit);
MODULE_AUTHOR("Semihalf, Piotr Ziecik <kosmo@semihalf.com>");
MODULE_DESCRIPTION("TI BQ32000 I2C RTC driver");
MODULE_LICENSE("GPL");

View file

@ -169,6 +169,8 @@ static int __devinit bq4802_probe(struct platform_device *pdev)
goto out_free;
}
platform_set_drvdata(pdev, p);
p->rtc = rtc_device_register("bq4802", &pdev->dev,
&bq4802_ops, THIS_MODULE);
if (IS_ERR(p->rtc)) {
@ -176,7 +178,6 @@ static int __devinit bq4802_probe(struct platform_device *pdev)
goto out_iounmap;
}
platform_set_drvdata(pdev, p);
err = 0;
out:
return err;

View file

@ -420,49 +420,43 @@ static int cmos_irq_set_state(struct device *dev, int enabled)
return 0;
}
#if defined(CONFIG_RTC_INTF_DEV) || defined(CONFIG_RTC_INTF_DEV_MODULE)
static int
cmos_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct cmos_rtc *cmos = dev_get_drvdata(dev);
unsigned long flags;
switch (cmd) {
case RTC_AIE_OFF:
case RTC_AIE_ON:
case RTC_UIE_OFF:
case RTC_UIE_ON:
if (!is_valid_irq(cmos->irq))
return -EINVAL;
break;
/* PIE ON/OFF is handled by cmos_irq_set_state() */
default:
return -ENOIOCTLCMD;
}
if (!is_valid_irq(cmos->irq))
return -EINVAL;
spin_lock_irqsave(&rtc_lock, flags);
switch (cmd) {
case RTC_AIE_OFF: /* alarm off */
cmos_irq_disable(cmos, RTC_AIE);
break;
case RTC_AIE_ON: /* alarm on */
if (enabled)
cmos_irq_enable(cmos, RTC_AIE);
break;
case RTC_UIE_OFF: /* update off */
cmos_irq_disable(cmos, RTC_UIE);
break;
case RTC_UIE_ON: /* update on */
cmos_irq_enable(cmos, RTC_UIE);
break;
}
else
cmos_irq_disable(cmos, RTC_AIE);
spin_unlock_irqrestore(&rtc_lock, flags);
return 0;
}
#else
#define cmos_rtc_ioctl NULL
#endif
static int cmos_update_irq_enable(struct device *dev, unsigned int enabled)
{
struct cmos_rtc *cmos = dev_get_drvdata(dev);
unsigned long flags;
if (!is_valid_irq(cmos->irq))
return -EINVAL;
spin_lock_irqsave(&rtc_lock, flags);
if (enabled)
cmos_irq_enable(cmos, RTC_UIE);
else
cmos_irq_disable(cmos, RTC_UIE);
spin_unlock_irqrestore(&rtc_lock, flags);
return 0;
}
#if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE)
@ -503,14 +497,15 @@ static int cmos_procfs(struct device *dev, struct seq_file *seq)
#endif
static const struct rtc_class_ops cmos_rtc_ops = {
.ioctl = cmos_rtc_ioctl,
.read_time = cmos_read_time,
.set_time = cmos_set_time,
.read_alarm = cmos_read_alarm,
.set_alarm = cmos_set_alarm,
.proc = cmos_procfs,
.irq_set_freq = cmos_irq_set_freq,
.irq_set_state = cmos_irq_set_state,
.read_time = cmos_read_time,
.set_time = cmos_set_time,
.read_alarm = cmos_read_alarm,
.set_alarm = cmos_set_alarm,
.proc = cmos_procfs,
.irq_set_freq = cmos_irq_set_freq,
.irq_set_state = cmos_irq_set_state,
.alarm_irq_enable = cmos_alarm_irq_enable,
.update_irq_enable = cmos_update_irq_enable,
};
/*----------------------------------------------------------------*/
@ -871,8 +866,9 @@ static int cmos_suspend(struct device *dev, pm_message_t mesg)
mask = RTC_IRQMASK;
tmp &= ~mask;
CMOS_WRITE(tmp, RTC_CONTROL);
hpet_mask_rtc_irq_bit(mask);
/* shut down hpet emulation - we don't need it for alarm */
hpet_mask_rtc_irq_bit(RTC_PIE|RTC_AIE|RTC_UIE);
cmos_checkintr(cmos, tmp);
}
spin_unlock_irq(&rtc_lock);

View file

@ -143,7 +143,6 @@ static int ds1302_rtc_ioctl(struct device *dev, unsigned int cmd,
#ifdef RTC_SET_CHARGE
case RTC_SET_CHARGE:
{
struct ds1302_rtc *rtc = dev_get_drvdata(dev);
int tcs_val;
if (copy_from_user(&tcs_val, (int __user *)arg, sizeof(int)))

View file

@ -617,7 +617,6 @@ static struct bin_attribute nvram = {
static int __devinit ds1305_probe(struct spi_device *spi)
{
struct ds1305 *ds1305;
struct rtc_device *rtc;
int status;
u8 addr, value;
struct ds1305_platform_data *pdata = spi->dev.platform_data;
@ -756,14 +755,13 @@ static int __devinit ds1305_probe(struct spi_device *spi)
dev_dbg(&spi->dev, "AM/PM\n");
/* register RTC ... from here on, ds1305->ctrl needs locking */
rtc = rtc_device_register("ds1305", &spi->dev,
ds1305->rtc = rtc_device_register("ds1305", &spi->dev,
&ds1305_ops, THIS_MODULE);
if (IS_ERR(rtc)) {
status = PTR_ERR(rtc);
if (IS_ERR(ds1305->rtc)) {
status = PTR_ERR(ds1305->rtc);
dev_dbg(&spi->dev, "register rtc --> %d\n", status);
goto fail0;
}
ds1305->rtc = rtc;
/* Maybe set up alarm IRQ; be ready to handle it triggering right
* away. NOTE that we don't share this. The signal is active low,
@ -774,7 +772,7 @@ static int __devinit ds1305_probe(struct spi_device *spi)
if (spi->irq) {
INIT_WORK(&ds1305->work, ds1305_work);
status = request_irq(spi->irq, ds1305_irq,
0, dev_name(&rtc->dev), ds1305);
0, dev_name(&ds1305->rtc->dev), ds1305);
if (status < 0) {
dev_dbg(&spi->dev, "request_irq %d --> %d\n",
spi->irq, status);
@ -794,7 +792,7 @@ static int __devinit ds1305_probe(struct spi_device *spi)
fail2:
free_irq(spi->irq, ds1305);
fail1:
rtc_device_unregister(rtc);
rtc_device_unregister(ds1305->rtc);
fail0:
kfree(ds1305);
return status;
@ -802,7 +800,7 @@ static int __devinit ds1305_probe(struct spi_device *spi)
static int __devexit ds1305_remove(struct spi_device *spi)
{
struct ds1305 *ds1305 = spi_get_drvdata(spi);
struct ds1305 *ds1305 = spi_get_drvdata(spi);
sysfs_remove_bin_file(&spi->dev.kobj, &nvram);

View file

@ -874,7 +874,7 @@ static int __devinit ds1307_probe(struct i2c_client *client,
}
if (want_irq) {
err = request_irq(client->irq, ds1307_irq, 0,
err = request_irq(client->irq, ds1307_irq, IRQF_SHARED,
ds1307->rtc->name, client);
if (err) {
dev_err(&client->dev,

View file

@ -87,7 +87,6 @@ enum ds1511reg {
struct rtc_plat_data {
struct rtc_device *rtc;
void __iomem *ioaddr; /* virtual base address */
unsigned long baseaddr; /* physical base address */
int size; /* amount of memory mapped */
int irq;
unsigned int irqen;
@ -95,6 +94,7 @@ struct rtc_plat_data {
int alrm_min;
int alrm_hour;
int alrm_mday;
spinlock_t lock;
};
static DEFINE_SPINLOCK(ds1511_lock);
@ -302,7 +302,7 @@ ds1511_rtc_update_alarm(struct rtc_plat_data *pdata)
{
unsigned long flags;
spin_lock_irqsave(&pdata->rtc->irq_lock, flags);
spin_lock_irqsave(&pdata->lock, flags);
rtc_write(pdata->alrm_mday < 0 || (pdata->irqen & RTC_UF) ?
0x80 : bin2bcd(pdata->alrm_mday) & 0x3f,
RTC_ALARM_DATE);
@ -317,7 +317,7 @@ ds1511_rtc_update_alarm(struct rtc_plat_data *pdata)
RTC_ALARM_SEC);
rtc_write(rtc_read(RTC_CMD) | (pdata->irqen ? RTC_TIE : 0), RTC_CMD);
rtc_read(RTC_CMD1); /* clear interrupts */
spin_unlock_irqrestore(&pdata->rtc->irq_lock, flags);
spin_unlock_irqrestore(&pdata->lock, flags);
}
static int
@ -362,61 +362,63 @@ ds1511_interrupt(int irq, void *dev_id)
{
struct platform_device *pdev = dev_id;
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
unsigned long events = RTC_IRQF;
unsigned long events = 0;
spin_lock(&pdata->lock);
/*
* read and clear interrupt
*/
if (!(rtc_read(RTC_CMD1) & DS1511_IRQF)) {
return IRQ_NONE;
if (rtc_read(RTC_CMD1) & DS1511_IRQF) {
events = RTC_IRQF;
if (rtc_read(RTC_ALARM_SEC) & 0x80)
events |= RTC_UF;
else
events |= RTC_AF;
if (likely(pdata->rtc))
rtc_update_irq(pdata->rtc, 1, events);
}
if (rtc_read(RTC_ALARM_SEC) & 0x80) {
events |= RTC_UF;
} else {
events |= RTC_AF;
}
rtc_update_irq(pdata->rtc, 1, events);
return IRQ_HANDLED;
spin_unlock(&pdata->lock);
return events ? IRQ_HANDLED : IRQ_NONE;
}
static int
ds1511_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
static int ds1511_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
if (pdata->irq <= 0) {
return -ENOIOCTLCMD; /* fall back into rtc-dev's emulation */
}
switch (cmd) {
case RTC_AIE_OFF:
pdata->irqen &= ~RTC_AF;
ds1511_rtc_update_alarm(pdata);
break;
case RTC_AIE_ON:
if (pdata->irq <= 0)
return -EINVAL;
if (enabled)
pdata->irqen |= RTC_AF;
ds1511_rtc_update_alarm(pdata);
break;
case RTC_UIE_OFF:
pdata->irqen &= ~RTC_UF;
ds1511_rtc_update_alarm(pdata);
break;
case RTC_UIE_ON:
else
pdata->irqen &= ~RTC_AF;
ds1511_rtc_update_alarm(pdata);
return 0;
}
static int ds1511_rtc_update_irq_enable(struct device *dev,
unsigned int enabled)
{
struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
if (pdata->irq <= 0)
return -EINVAL;
if (enabled)
pdata->irqen |= RTC_UF;
ds1511_rtc_update_alarm(pdata);
break;
default:
return -ENOIOCTLCMD;
}
else
pdata->irqen &= ~RTC_UF;
ds1511_rtc_update_alarm(pdata);
return 0;
}
static const struct rtc_class_ops ds1511_rtc_ops = {
.read_time = ds1511_rtc_read_time,
.set_time = ds1511_rtc_set_time,
.read_alarm = ds1511_rtc_read_alarm,
.set_alarm = ds1511_rtc_set_alarm,
.ioctl = ds1511_rtc_ioctl,
.read_time = ds1511_rtc_read_time,
.set_time = ds1511_rtc_set_time,
.read_alarm = ds1511_rtc_read_alarm,
.set_alarm = ds1511_rtc_set_alarm,
.alarm_irq_enable = ds1511_rtc_alarm_irq_enable,
.update_irq_enable = ds1511_rtc_update_irq_enable,
};
static ssize_t
@ -492,29 +494,23 @@ ds1511_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
struct resource *res;
struct rtc_plat_data *pdata = NULL;
struct rtc_plat_data *pdata;
int ret = 0;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
return -ENODEV;
}
pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
if (!pdata) {
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
}
pdata->size = res->end - res->start + 1;
if (!request_mem_region(res->start, pdata->size, pdev->name)) {
ret = -EBUSY;
goto out;
}
pdata->baseaddr = res->start;
pdata->size = pdata->size;
ds1511_base = ioremap(pdata->baseaddr, pdata->size);
if (!ds1511_base) {
ret = -ENOMEM;
goto out;
}
if (!devm_request_mem_region(&pdev->dev, res->start, pdata->size,
pdev->name))
return -EBUSY;
ds1511_base = devm_ioremap(&pdev->dev, res->start, pdata->size);
if (!ds1511_base)
return -ENOMEM;
pdata->ioaddr = ds1511_base;
pdata->irq = platform_get_irq(pdev, 0);
@ -540,13 +536,15 @@ ds1511_rtc_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "voltage-low detected.\n");
}
spin_lock_init(&pdata->lock);
platform_set_drvdata(pdev, pdata);
/*
* if the platform has an interrupt in mind for this device,
* then by all means, set it
*/
if (pdata->irq > 0) {
rtc_read(RTC_CMD1);
if (request_irq(pdata->irq, ds1511_interrupt,
if (devm_request_irq(&pdev->dev, pdata->irq, ds1511_interrupt,
IRQF_DISABLED | IRQF_SHARED, pdev->name, pdev) < 0) {
dev_warn(&pdev->dev, "interrupt not available.\n");
@ -556,33 +554,13 @@ ds1511_rtc_probe(struct platform_device *pdev)
rtc = rtc_device_register(pdev->name, &pdev->dev, &ds1511_rtc_ops,
THIS_MODULE);
if (IS_ERR(rtc)) {
ret = PTR_ERR(rtc);
goto out;
}
if (IS_ERR(rtc))
return PTR_ERR(rtc);
pdata->rtc = rtc;
platform_set_drvdata(pdev, pdata);
ret = sysfs_create_bin_file(&pdev->dev.kobj, &ds1511_nvram_attr);
if (ret) {
goto out;
}
return 0;
out:
if (pdata->rtc) {
rtc_device_unregister(pdata->rtc);
}
if (pdata->irq > 0) {
free_irq(pdata->irq, pdev);
}
if (ds1511_base) {
iounmap(ds1511_base);
ds1511_base = NULL;
}
if (pdata->baseaddr) {
release_mem_region(pdata->baseaddr, pdata->size);
}
kfree(pdata);
ret = sysfs_create_bin_file(&pdev->dev.kobj, &ds1511_nvram_attr);
if (ret)
rtc_device_unregister(pdata->rtc);
return ret;
}
@ -593,19 +571,13 @@ ds1511_rtc_remove(struct platform_device *pdev)
sysfs_remove_bin_file(&pdev->dev.kobj, &ds1511_nvram_attr);
rtc_device_unregister(pdata->rtc);
pdata->rtc = NULL;
if (pdata->irq > 0) {
/*
* disable the alarm interrupt
*/
rtc_write(rtc_read(RTC_CMD) & ~RTC_TIE, RTC_CMD);
rtc_read(RTC_CMD1);
free_irq(pdata->irq, pdev);
}
iounmap(pdata->ioaddr);
ds1511_base = NULL;
release_mem_region(pdata->baseaddr, pdata->size);
kfree(pdata);
return 0;
}

View file

@ -18,7 +18,7 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#define DRV_VERSION "0.2"
#define DRV_VERSION "0.3"
#define RTC_REG_SIZE 0x2000
#define RTC_OFFSET 0x1ff0
@ -61,7 +61,6 @@
struct rtc_plat_data {
struct rtc_device *rtc;
void __iomem *ioaddr;
resource_size_t baseaddr;
unsigned long last_jiffies;
int irq;
unsigned int irqen;
@ -69,6 +68,7 @@ struct rtc_plat_data {
int alrm_min;
int alrm_hour;
int alrm_mday;
spinlock_t lock;
};
static int ds1553_rtc_set_time(struct device *dev, struct rtc_time *tm)
@ -139,7 +139,7 @@ static void ds1553_rtc_update_alarm(struct rtc_plat_data *pdata)
void __iomem *ioaddr = pdata->ioaddr;
unsigned long flags;
spin_lock_irqsave(&pdata->rtc->irq_lock, flags);
spin_lock_irqsave(&pdata->lock, flags);
writeb(pdata->alrm_mday < 0 || (pdata->irqen & RTC_UF) ?
0x80 : bin2bcd(pdata->alrm_mday),
ioaddr + RTC_DATE_ALARM);
@ -154,7 +154,7 @@ static void ds1553_rtc_update_alarm(struct rtc_plat_data *pdata)
ioaddr + RTC_SECONDS_ALARM);
writeb(pdata->irqen ? RTC_INTS_AE : 0, ioaddr + RTC_INTERRUPTS);
readb(ioaddr + RTC_FLAGS); /* clear interrupts */
spin_unlock_irqrestore(&pdata->rtc->irq_lock, flags);
spin_unlock_irqrestore(&pdata->lock, flags);
}
static int ds1553_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
@ -194,64 +194,69 @@ static irqreturn_t ds1553_rtc_interrupt(int irq, void *dev_id)
struct platform_device *pdev = dev_id;
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
void __iomem *ioaddr = pdata->ioaddr;
unsigned long events = RTC_IRQF;
unsigned long events = 0;
spin_lock(&pdata->lock);
/* read and clear interrupt */
if (!(readb(ioaddr + RTC_FLAGS) & RTC_FLAGS_AF))
return IRQ_NONE;
if (readb(ioaddr + RTC_SECONDS_ALARM) & 0x80)
events |= RTC_UF;
else
events |= RTC_AF;
rtc_update_irq(pdata->rtc, 1, events);
return IRQ_HANDLED;
if (readb(ioaddr + RTC_FLAGS) & RTC_FLAGS_AF) {
events = RTC_IRQF;
if (readb(ioaddr + RTC_SECONDS_ALARM) & 0x80)
events |= RTC_UF;
else
events |= RTC_AF;
if (likely(pdata->rtc))
rtc_update_irq(pdata->rtc, 1, events);
}
spin_unlock(&pdata->lock);
return events ? IRQ_HANDLED : IRQ_NONE;
}
static int ds1553_rtc_ioctl(struct device *dev, unsigned int cmd,
unsigned long arg)
static int ds1553_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
if (pdata->irq <= 0)
return -ENOIOCTLCMD; /* fall back into rtc-dev's emulation */
switch (cmd) {
case RTC_AIE_OFF:
pdata->irqen &= ~RTC_AF;
ds1553_rtc_update_alarm(pdata);
break;
case RTC_AIE_ON:
return -EINVAL;
if (enabled)
pdata->irqen |= RTC_AF;
ds1553_rtc_update_alarm(pdata);
break;
case RTC_UIE_OFF:
pdata->irqen &= ~RTC_UF;
ds1553_rtc_update_alarm(pdata);
break;
case RTC_UIE_ON:
else
pdata->irqen &= ~RTC_AF;
ds1553_rtc_update_alarm(pdata);
return 0;
}
static int ds1553_rtc_update_irq_enable(struct device *dev,
unsigned int enabled)
{
struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
if (pdata->irq <= 0)
return -EINVAL;
if (enabled)
pdata->irqen |= RTC_UF;
ds1553_rtc_update_alarm(pdata);
break;
default:
return -ENOIOCTLCMD;
}
else
pdata->irqen &= ~RTC_UF;
ds1553_rtc_update_alarm(pdata);
return 0;
}
static const struct rtc_class_ops ds1553_rtc_ops = {
.read_time = ds1553_rtc_read_time,
.set_time = ds1553_rtc_set_time,
.read_alarm = ds1553_rtc_read_alarm,
.set_alarm = ds1553_rtc_set_alarm,
.ioctl = ds1553_rtc_ioctl,
.read_time = ds1553_rtc_read_time,
.set_time = ds1553_rtc_set_time,
.read_alarm = ds1553_rtc_read_alarm,
.set_alarm = ds1553_rtc_set_alarm,
.alarm_irq_enable = ds1553_rtc_alarm_irq_enable,
.update_irq_enable = ds1553_rtc_update_irq_enable,
};
static ssize_t ds1553_nvram_read(struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t size)
{
struct platform_device *pdev =
to_platform_device(container_of(kobj, struct device, kobj));
struct device *dev = container_of(kobj, struct device, kobj);
struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
void __iomem *ioaddr = pdata->ioaddr;
ssize_t count;
@ -265,8 +270,8 @@ static ssize_t ds1553_nvram_write(struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t size)
{
struct platform_device *pdev =
to_platform_device(container_of(kobj, struct device, kobj));
struct device *dev = container_of(kobj, struct device, kobj);
struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
void __iomem *ioaddr = pdata->ioaddr;
ssize_t count;
@ -291,26 +296,23 @@ static int __devinit ds1553_rtc_probe(struct platform_device *pdev)
struct rtc_device *rtc;
struct resource *res;
unsigned int cen, sec;
struct rtc_plat_data *pdata = NULL;
void __iomem *ioaddr = NULL;
struct rtc_plat_data *pdata;
void __iomem *ioaddr;
int ret = 0;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
if (!request_mem_region(res->start, RTC_REG_SIZE, pdev->name)) {
ret = -EBUSY;
goto out;
}
pdata->baseaddr = res->start;
ioaddr = ioremap(pdata->baseaddr, RTC_REG_SIZE);
if (!ioaddr) {
ret = -ENOMEM;
goto out;
}
if (!devm_request_mem_region(&pdev->dev, res->start, RTC_REG_SIZE,
pdev->name))
return -EBUSY;
ioaddr = devm_ioremap(&pdev->dev, res->start, RTC_REG_SIZE);
if (!ioaddr)
return -ENOMEM;
pdata->ioaddr = ioaddr;
pdata->irq = platform_get_irq(pdev, 0);
@ -326,9 +328,13 @@ static int __devinit ds1553_rtc_probe(struct platform_device *pdev)
if (readb(ioaddr + RTC_FLAGS) & RTC_FLAGS_BLF)
dev_warn(&pdev->dev, "voltage-low detected.\n");
spin_lock_init(&pdata->lock);
pdata->last_jiffies = jiffies;
platform_set_drvdata(pdev, pdata);
if (pdata->irq > 0) {
writeb(0, ioaddr + RTC_INTERRUPTS);
if (request_irq(pdata->irq, ds1553_rtc_interrupt,
if (devm_request_irq(&pdev->dev, pdata->irq,
ds1553_rtc_interrupt,
IRQF_DISABLED, pdev->name, pdev) < 0) {
dev_warn(&pdev->dev, "interrupt not available.\n");
pdata->irq = 0;
@ -337,27 +343,13 @@ static int __devinit ds1553_rtc_probe(struct platform_device *pdev)
rtc = rtc_device_register(pdev->name, &pdev->dev,
&ds1553_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc)) {
ret = PTR_ERR(rtc);
goto out;
}
if (IS_ERR(rtc))
return PTR_ERR(rtc);
pdata->rtc = rtc;
pdata->last_jiffies = jiffies;
platform_set_drvdata(pdev, pdata);
ret = sysfs_create_bin_file(&pdev->dev.kobj, &ds1553_nvram_attr);
if (ret)
goto out;
return 0;
out:
if (pdata->rtc)
rtc_device_unregister(pdata->rtc);
if (pdata->irq > 0)
free_irq(pdata->irq, pdev);
if (ioaddr)
iounmap(ioaddr);
if (pdata->baseaddr)
release_mem_region(pdata->baseaddr, RTC_REG_SIZE);
kfree(pdata);
rtc_device_unregister(rtc);
return ret;
}
@ -367,13 +359,8 @@ static int __devexit ds1553_rtc_remove(struct platform_device *pdev)
sysfs_remove_bin_file(&pdev->dev.kobj, &ds1553_nvram_attr);
rtc_device_unregister(pdata->rtc);
if (pdata->irq > 0) {
if (pdata->irq > 0)
writeb(0, pdata->ioaddr + RTC_INTERRUPTS);
free_irq(pdata->irq, pdev);
}
iounmap(pdata->ioaddr);
release_mem_region(pdata->baseaddr, RTC_REG_SIZE);
kfree(pdata);
return 0;
}

View file

@ -21,7 +21,7 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#define DRV_VERSION "0.3"
#define DRV_VERSION "0.4"
#define RTC_SIZE 8
@ -55,7 +55,6 @@ struct rtc_plat_data {
void __iomem *ioaddr_rtc;
size_t size_nvram;
size_t size;
resource_size_t baseaddr;
unsigned long last_jiffies;
struct bin_attribute nvram_attr;
};
@ -132,8 +131,8 @@ static ssize_t ds1742_nvram_read(struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t size)
{
struct platform_device *pdev =
to_platform_device(container_of(kobj, struct device, kobj));
struct device *dev = container_of(kobj, struct device, kobj);
struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
void __iomem *ioaddr = pdata->ioaddr_nvram;
ssize_t count;
@ -147,8 +146,8 @@ static ssize_t ds1742_nvram_write(struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t size)
{
struct platform_device *pdev =
to_platform_device(container_of(kobj, struct device, kobj));
struct device *dev = container_of(kobj, struct device, kobj);
struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
void __iomem *ioaddr = pdata->ioaddr_nvram;
ssize_t count;
@ -163,27 +162,24 @@ static int __devinit ds1742_rtc_probe(struct platform_device *pdev)
struct rtc_device *rtc;
struct resource *res;
unsigned int cen, sec;
struct rtc_plat_data *pdata = NULL;
void __iomem *ioaddr = NULL;
struct rtc_plat_data *pdata;
void __iomem *ioaddr;
int ret = 0;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
pdata->size = res->end - res->start + 1;
if (!request_mem_region(res->start, pdata->size, pdev->name)) {
ret = -EBUSY;
goto out;
}
pdata->baseaddr = res->start;
ioaddr = ioremap(pdata->baseaddr, pdata->size);
if (!ioaddr) {
ret = -ENOMEM;
goto out;
}
if (!devm_request_mem_region(&pdev->dev, res->start, pdata->size,
pdev->name))
return -EBUSY;
ioaddr = devm_ioremap(&pdev->dev, res->start, pdata->size);
if (!ioaddr)
return -ENOMEM;
pdata->ioaddr_nvram = ioaddr;
pdata->size_nvram = pdata->size - RTC_SIZE;
pdata->ioaddr_rtc = ioaddr + pdata->size_nvram;
@ -207,31 +203,19 @@ static int __devinit ds1742_rtc_probe(struct platform_device *pdev)
if (!(readb(ioaddr + RTC_DAY) & RTC_BATT_FLAG))
dev_warn(&pdev->dev, "voltage-low detected.\n");
rtc = rtc_device_register(pdev->name, &pdev->dev,
&ds1742_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc)) {
ret = PTR_ERR(rtc);
goto out;
}
pdata->rtc = rtc;
pdata->last_jiffies = jiffies;
platform_set_drvdata(pdev, pdata);
rtc = rtc_device_register(pdev->name, &pdev->dev,
&ds1742_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
pdata->rtc = rtc;
ret = sysfs_create_bin_file(&pdev->dev.kobj, &pdata->nvram_attr);
if (ret) {
dev_err(&pdev->dev, "creating nvram file in sysfs failed\n");
goto out;
rtc_device_unregister(rtc);
}
return 0;
out:
if (pdata->rtc)
rtc_device_unregister(pdata->rtc);
if (pdata->ioaddr_nvram)
iounmap(pdata->ioaddr_nvram);
if (pdata->baseaddr)
release_mem_region(pdata->baseaddr, pdata->size);
kfree(pdata);
return ret;
}
@ -241,9 +225,6 @@ static int __devexit ds1742_rtc_remove(struct platform_device *pdev)
sysfs_remove_bin_file(&pdev->dev.kobj, &pdata->nvram_attr);
rtc_device_unregister(pdata->rtc);
iounmap(pdata->ioaddr_nvram);
release_mem_region(pdata->baseaddr, pdata->size);
kfree(pdata);
return 0;
}

View file

@ -142,7 +142,6 @@ static const struct rtc_class_ops m48t35_ops = {
static int __devinit m48t35_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
struct resource *res;
struct m48t35_priv *priv;
int ret = 0;
@ -171,20 +170,21 @@ static int __devinit m48t35_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto out;
}
spin_lock_init(&priv->lock);
rtc = rtc_device_register("m48t35", &pdev->dev,
platform_set_drvdata(pdev, priv);
priv->rtc = rtc_device_register("m48t35", &pdev->dev,
&m48t35_ops, THIS_MODULE);
if (IS_ERR(rtc)) {
ret = PTR_ERR(rtc);
if (IS_ERR(priv->rtc)) {
ret = PTR_ERR(priv->rtc);
goto out;
}
priv->rtc = rtc;
platform_set_drvdata(pdev, priv);
return 0;
out:
if (priv->rtc)
rtc_device_unregister(priv->rtc);
if (priv->reg)
iounmap(priv->reg);
if (priv->baseaddr)

Some files were not shown because too many files have changed in this diff Show more