Merge remote-tracking branches 'asoc/topic/intel', 'asoc/topic/kirkwood', 'asoc/topic/lm49453', 'asoc/topic/max9768' and 'asoc/topic/max98088' into asoc-next

This commit is contained in:
Mark Brown 2015-08-30 15:54:38 +01:00
31 changed files with 5464 additions and 350 deletions

View file

@ -188,7 +188,6 @@ static struct reg_default lm49453_reg_defs[] = {
/* codec private data */
struct lm49453_priv {
struct regmap *regmap;
int fs_rate;
};
/* capture path controls */
@ -1112,13 +1111,10 @@ static int lm49453_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
struct lm49453_priv *lm49453 = snd_soc_codec_get_drvdata(codec);
u16 clk_div = 0;
lm49453->fs_rate = params_rate(params);
/* Setting DAC clock dividers based on substream sample rate. */
switch (lm49453->fs_rate) {
switch (params_rate(params)) {
case 8000:
case 16000:
case 32000:

View file

@ -43,8 +43,8 @@ static struct reg_default max9768_default_regs[] = {
static int max9768_get_gpio(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
struct max9768 *max9768 = snd_soc_codec_get_drvdata(codec);
struct snd_soc_component *c = snd_soc_kcontrol_component(kcontrol);
struct max9768 *max9768 = snd_soc_component_get_drvdata(c);
int val = gpio_get_value_cansleep(max9768->mute_gpio);
ucontrol->value.integer.value[0] = !val;
@ -55,8 +55,8 @@ static int max9768_get_gpio(struct snd_kcontrol *kcontrol,
static int max9768_set_gpio(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
struct max9768 *max9768 = snd_soc_codec_get_drvdata(codec);
struct snd_soc_component *c = snd_soc_kcontrol_component(kcontrol);
struct max9768 *max9768 = snd_soc_component_get_drvdata(c);
gpio_set_value_cansleep(max9768->mute_gpio, !ucontrol->value.integer.value[0]);
@ -130,19 +130,20 @@ static const struct snd_soc_dapm_route max9768_dapm_routes[] = {
{ "OUT-", NULL, "IN" },
};
static int max9768_probe(struct snd_soc_codec *codec)
static int max9768_probe(struct snd_soc_component *component)
{
struct max9768 *max9768 = snd_soc_codec_get_drvdata(codec);
struct max9768 *max9768 = snd_soc_component_get_drvdata(component);
int ret;
if (max9768->flags & MAX9768_FLAG_CLASSIC_PWM) {
ret = snd_soc_write(codec, MAX9768_CTRL, MAX9768_CTRL_PWM);
ret = regmap_write(max9768->regmap, MAX9768_CTRL,
MAX9768_CTRL_PWM);
if (ret)
return ret;
}
if (gpio_is_valid(max9768->mute_gpio)) {
ret = snd_soc_add_codec_controls(codec, max9768_mute,
ret = snd_soc_add_component_controls(component, max9768_mute,
ARRAY_SIZE(max9768_mute));
if (ret)
return ret;
@ -151,7 +152,7 @@ static int max9768_probe(struct snd_soc_codec *codec)
return 0;
}
static struct snd_soc_codec_driver max9768_codec_driver = {
static struct snd_soc_component_driver max9768_component_driver = {
.probe = max9768_probe,
.controls = max9768_volume,
.num_controls = ARRAY_SIZE(max9768_volume),
@ -183,11 +184,13 @@ static int max9768_i2c_probe(struct i2c_client *client,
if (pdata) {
/* Mute on powerup to avoid clicks */
err = gpio_request_one(pdata->mute_gpio, GPIOF_INIT_HIGH, "MAX9768 Mute");
err = devm_gpio_request_one(&client->dev, pdata->mute_gpio,
GPIOF_INIT_HIGH, "MAX9768 Mute");
max9768->mute_gpio = err ?: pdata->mute_gpio;
/* Activate chip by releasing shutdown, enables I2C */
err = gpio_request_one(pdata->shdn_gpio, GPIOF_INIT_HIGH, "MAX9768 Shutdown");
err = devm_gpio_request_one(&client->dev, pdata->shdn_gpio,
GPIOF_INIT_HIGH, "MAX9768 Shutdown");
max9768->shdn_gpio = err ?: pdata->shdn_gpio;
max9768->flags = pdata->flags;
@ -199,38 +202,11 @@ static int max9768_i2c_probe(struct i2c_client *client,
i2c_set_clientdata(client, max9768);
max9768->regmap = devm_regmap_init_i2c(client, &max9768_i2c_regmap_config);
if (IS_ERR(max9768->regmap)) {
err = PTR_ERR(max9768->regmap);
goto err_gpio_free;
}
if (IS_ERR(max9768->regmap))
return PTR_ERR(max9768->regmap);
err = snd_soc_register_codec(&client->dev, &max9768_codec_driver, NULL, 0);
if (err)
goto err_gpio_free;
return 0;
err_gpio_free:
if (gpio_is_valid(max9768->shdn_gpio))
gpio_free(max9768->shdn_gpio);
if (gpio_is_valid(max9768->mute_gpio))
gpio_free(max9768->mute_gpio);
return err;
}
static int max9768_i2c_remove(struct i2c_client *client)
{
struct max9768 *max9768 = i2c_get_clientdata(client);
snd_soc_unregister_codec(&client->dev);
if (gpio_is_valid(max9768->shdn_gpio))
gpio_free(max9768->shdn_gpio);
if (gpio_is_valid(max9768->mute_gpio))
gpio_free(max9768->mute_gpio);
return 0;
return devm_snd_soc_register_component(&client->dev,
&max9768_component_driver, NULL, 0);
}
static const struct i2c_device_id max9768_i2c_id[] = {
@ -244,7 +220,6 @@ static struct i2c_driver max9768_i2c_driver = {
.name = "max9768",
},
.probe = max9768_i2c_probe,
.remove = max9768_i2c_remove,
.id_table = max9768_i2c_id,
};
module_i2c_driver(max9768_i2c_driver);

View file

@ -258,292 +258,36 @@ static const struct reg_default max98088_reg[] = {
{ 0xc9, 0x00 }, /* C9 DAI2 biquad */
};
static struct {
int readable;
int writable;
int vol;
} max98088_access[M98088_REG_CNT] = {
{ 0xFF, 0xFF, 1 }, /* 00 IRQ status */
{ 0xFF, 0x00, 1 }, /* 01 MIC status */
{ 0xFF, 0x00, 1 }, /* 02 jack status */
{ 0x1F, 0x1F, 1 }, /* 03 battery voltage */
{ 0xFF, 0xFF, 0 }, /* 04 */
{ 0xFF, 0xFF, 0 }, /* 05 */
{ 0xFF, 0xFF, 0 }, /* 06 */
{ 0xFF, 0xFF, 0 }, /* 07 */
{ 0xFF, 0xFF, 0 }, /* 08 */
{ 0xFF, 0xFF, 0 }, /* 09 */
{ 0xFF, 0xFF, 0 }, /* 0A */
{ 0xFF, 0xFF, 0 }, /* 0B */
{ 0xFF, 0xFF, 0 }, /* 0C */
{ 0xFF, 0xFF, 0 }, /* 0D */
{ 0xFF, 0xFF, 0 }, /* 0E */
{ 0xFF, 0xFF, 0 }, /* 0F interrupt enable */
{ 0xFF, 0xFF, 0 }, /* 10 master clock */
{ 0xFF, 0xFF, 0 }, /* 11 DAI1 clock mode */
{ 0xFF, 0xFF, 0 }, /* 12 DAI1 clock control */
{ 0xFF, 0xFF, 0 }, /* 13 DAI1 clock control */
{ 0xFF, 0xFF, 0 }, /* 14 DAI1 format */
{ 0xFF, 0xFF, 0 }, /* 15 DAI1 clock */
{ 0xFF, 0xFF, 0 }, /* 16 DAI1 config */
{ 0xFF, 0xFF, 0 }, /* 17 DAI1 TDM */
{ 0xFF, 0xFF, 0 }, /* 18 DAI1 filters */
{ 0xFF, 0xFF, 0 }, /* 19 DAI2 clock mode */
{ 0xFF, 0xFF, 0 }, /* 1A DAI2 clock control */
{ 0xFF, 0xFF, 0 }, /* 1B DAI2 clock control */
{ 0xFF, 0xFF, 0 }, /* 1C DAI2 format */
{ 0xFF, 0xFF, 0 }, /* 1D DAI2 clock */
{ 0xFF, 0xFF, 0 }, /* 1E DAI2 config */
{ 0xFF, 0xFF, 0 }, /* 1F DAI2 TDM */
{ 0xFF, 0xFF, 0 }, /* 20 DAI2 filters */
{ 0xFF, 0xFF, 0 }, /* 21 data config */
{ 0xFF, 0xFF, 0 }, /* 22 DAC mixer */
{ 0xFF, 0xFF, 0 }, /* 23 left ADC mixer */
{ 0xFF, 0xFF, 0 }, /* 24 right ADC mixer */
{ 0xFF, 0xFF, 0 }, /* 25 left HP mixer */
{ 0xFF, 0xFF, 0 }, /* 26 right HP mixer */
{ 0xFF, 0xFF, 0 }, /* 27 HP control */
{ 0xFF, 0xFF, 0 }, /* 28 left REC mixer */
{ 0xFF, 0xFF, 0 }, /* 29 right REC mixer */
{ 0xFF, 0xFF, 0 }, /* 2A REC control */
{ 0xFF, 0xFF, 0 }, /* 2B left SPK mixer */
{ 0xFF, 0xFF, 0 }, /* 2C right SPK mixer */
{ 0xFF, 0xFF, 0 }, /* 2D SPK control */
{ 0xFF, 0xFF, 0 }, /* 2E sidetone */
{ 0xFF, 0xFF, 0 }, /* 2F DAI1 playback level */
{ 0xFF, 0xFF, 0 }, /* 30 DAI1 playback level */
{ 0xFF, 0xFF, 0 }, /* 31 DAI2 playback level */
{ 0xFF, 0xFF, 0 }, /* 32 DAI2 playbakc level */
{ 0xFF, 0xFF, 0 }, /* 33 left ADC level */
{ 0xFF, 0xFF, 0 }, /* 34 right ADC level */
{ 0xFF, 0xFF, 0 }, /* 35 MIC1 level */
{ 0xFF, 0xFF, 0 }, /* 36 MIC2 level */
{ 0xFF, 0xFF, 0 }, /* 37 INA level */
{ 0xFF, 0xFF, 0 }, /* 38 INB level */
{ 0xFF, 0xFF, 0 }, /* 39 left HP volume */
{ 0xFF, 0xFF, 0 }, /* 3A right HP volume */
{ 0xFF, 0xFF, 0 }, /* 3B left REC volume */
{ 0xFF, 0xFF, 0 }, /* 3C right REC volume */
{ 0xFF, 0xFF, 0 }, /* 3D left SPK volume */
{ 0xFF, 0xFF, 0 }, /* 3E right SPK volume */
{ 0xFF, 0xFF, 0 }, /* 3F MIC config */
{ 0xFF, 0xFF, 0 }, /* 40 MIC threshold */
{ 0xFF, 0xFF, 0 }, /* 41 excursion limiter filter */
{ 0xFF, 0xFF, 0 }, /* 42 excursion limiter threshold */
{ 0xFF, 0xFF, 0 }, /* 43 ALC */
{ 0xFF, 0xFF, 0 }, /* 44 power limiter threshold */
{ 0xFF, 0xFF, 0 }, /* 45 power limiter config */
{ 0xFF, 0xFF, 0 }, /* 46 distortion limiter config */
{ 0xFF, 0xFF, 0 }, /* 47 audio input */
{ 0xFF, 0xFF, 0 }, /* 48 microphone */
{ 0xFF, 0xFF, 0 }, /* 49 level control */
{ 0xFF, 0xFF, 0 }, /* 4A bypass switches */
{ 0xFF, 0xFF, 0 }, /* 4B jack detect */
{ 0xFF, 0xFF, 0 }, /* 4C input enable */
{ 0xFF, 0xFF, 0 }, /* 4D output enable */
{ 0xFF, 0xFF, 0 }, /* 4E bias control */
{ 0xFF, 0xFF, 0 }, /* 4F DAC power */
{ 0xFF, 0xFF, 0 }, /* 50 DAC power */
{ 0xFF, 0xFF, 0 }, /* 51 system */
{ 0xFF, 0xFF, 0 }, /* 52 DAI1 EQ1 */
{ 0xFF, 0xFF, 0 }, /* 53 DAI1 EQ1 */
{ 0xFF, 0xFF, 0 }, /* 54 DAI1 EQ1 */
{ 0xFF, 0xFF, 0 }, /* 55 DAI1 EQ1 */
{ 0xFF, 0xFF, 0 }, /* 56 DAI1 EQ1 */
{ 0xFF, 0xFF, 0 }, /* 57 DAI1 EQ1 */
{ 0xFF, 0xFF, 0 }, /* 58 DAI1 EQ1 */
{ 0xFF, 0xFF, 0 }, /* 59 DAI1 EQ1 */
{ 0xFF, 0xFF, 0 }, /* 5A DAI1 EQ1 */
{ 0xFF, 0xFF, 0 }, /* 5B DAI1 EQ1 */
{ 0xFF, 0xFF, 0 }, /* 5C DAI1 EQ2 */
{ 0xFF, 0xFF, 0 }, /* 5D DAI1 EQ2 */
{ 0xFF, 0xFF, 0 }, /* 5E DAI1 EQ2 */
{ 0xFF, 0xFF, 0 }, /* 5F DAI1 EQ2 */
{ 0xFF, 0xFF, 0 }, /* 60 DAI1 EQ2 */
{ 0xFF, 0xFF, 0 }, /* 61 DAI1 EQ2 */
{ 0xFF, 0xFF, 0 }, /* 62 DAI1 EQ2 */
{ 0xFF, 0xFF, 0 }, /* 63 DAI1 EQ2 */
{ 0xFF, 0xFF, 0 }, /* 64 DAI1 EQ2 */
{ 0xFF, 0xFF, 0 }, /* 65 DAI1 EQ2 */
{ 0xFF, 0xFF, 0 }, /* 66 DAI1 EQ3 */
{ 0xFF, 0xFF, 0 }, /* 67 DAI1 EQ3 */
{ 0xFF, 0xFF, 0 }, /* 68 DAI1 EQ3 */
{ 0xFF, 0xFF, 0 }, /* 69 DAI1 EQ3 */
{ 0xFF, 0xFF, 0 }, /* 6A DAI1 EQ3 */
{ 0xFF, 0xFF, 0 }, /* 6B DAI1 EQ3 */
{ 0xFF, 0xFF, 0 }, /* 6C DAI1 EQ3 */
{ 0xFF, 0xFF, 0 }, /* 6D DAI1 EQ3 */
{ 0xFF, 0xFF, 0 }, /* 6E DAI1 EQ3 */
{ 0xFF, 0xFF, 0 }, /* 6F DAI1 EQ3 */
{ 0xFF, 0xFF, 0 }, /* 70 DAI1 EQ4 */
{ 0xFF, 0xFF, 0 }, /* 71 DAI1 EQ4 */
{ 0xFF, 0xFF, 0 }, /* 72 DAI1 EQ4 */
{ 0xFF, 0xFF, 0 }, /* 73 DAI1 EQ4 */
{ 0xFF, 0xFF, 0 }, /* 74 DAI1 EQ4 */
{ 0xFF, 0xFF, 0 }, /* 75 DAI1 EQ4 */
{ 0xFF, 0xFF, 0 }, /* 76 DAI1 EQ4 */
{ 0xFF, 0xFF, 0 }, /* 77 DAI1 EQ4 */
{ 0xFF, 0xFF, 0 }, /* 78 DAI1 EQ4 */
{ 0xFF, 0xFF, 0 }, /* 79 DAI1 EQ4 */
{ 0xFF, 0xFF, 0 }, /* 7A DAI1 EQ5 */
{ 0xFF, 0xFF, 0 }, /* 7B DAI1 EQ5 */
{ 0xFF, 0xFF, 0 }, /* 7C DAI1 EQ5 */
{ 0xFF, 0xFF, 0 }, /* 7D DAI1 EQ5 */
{ 0xFF, 0xFF, 0 }, /* 7E DAI1 EQ5 */
{ 0xFF, 0xFF, 0 }, /* 7F DAI1 EQ5 */
{ 0xFF, 0xFF, 0 }, /* 80 DAI1 EQ5 */
{ 0xFF, 0xFF, 0 }, /* 81 DAI1 EQ5 */
{ 0xFF, 0xFF, 0 }, /* 82 DAI1 EQ5 */
{ 0xFF, 0xFF, 0 }, /* 83 DAI1 EQ5 */
{ 0xFF, 0xFF, 0 }, /* 84 DAI2 EQ1 */
{ 0xFF, 0xFF, 0 }, /* 85 DAI2 EQ1 */
{ 0xFF, 0xFF, 0 }, /* 86 DAI2 EQ1 */
{ 0xFF, 0xFF, 0 }, /* 87 DAI2 EQ1 */
{ 0xFF, 0xFF, 0 }, /* 88 DAI2 EQ1 */
{ 0xFF, 0xFF, 0 }, /* 89 DAI2 EQ1 */
{ 0xFF, 0xFF, 0 }, /* 8A DAI2 EQ1 */
{ 0xFF, 0xFF, 0 }, /* 8B DAI2 EQ1 */
{ 0xFF, 0xFF, 0 }, /* 8C DAI2 EQ1 */
{ 0xFF, 0xFF, 0 }, /* 8D DAI2 EQ1 */
{ 0xFF, 0xFF, 0 }, /* 8E DAI2 EQ2 */
{ 0xFF, 0xFF, 0 }, /* 8F DAI2 EQ2 */
{ 0xFF, 0xFF, 0 }, /* 90 DAI2 EQ2 */
{ 0xFF, 0xFF, 0 }, /* 91 DAI2 EQ2 */
{ 0xFF, 0xFF, 0 }, /* 92 DAI2 EQ2 */
{ 0xFF, 0xFF, 0 }, /* 93 DAI2 EQ2 */
{ 0xFF, 0xFF, 0 }, /* 94 DAI2 EQ2 */
{ 0xFF, 0xFF, 0 }, /* 95 DAI2 EQ2 */
{ 0xFF, 0xFF, 0 }, /* 96 DAI2 EQ2 */
{ 0xFF, 0xFF, 0 }, /* 97 DAI2 EQ2 */
{ 0xFF, 0xFF, 0 }, /* 98 DAI2 EQ3 */
{ 0xFF, 0xFF, 0 }, /* 99 DAI2 EQ3 */
{ 0xFF, 0xFF, 0 }, /* 9A DAI2 EQ3 */
{ 0xFF, 0xFF, 0 }, /* 9B DAI2 EQ3 */
{ 0xFF, 0xFF, 0 }, /* 9C DAI2 EQ3 */
{ 0xFF, 0xFF, 0 }, /* 9D DAI2 EQ3 */
{ 0xFF, 0xFF, 0 }, /* 9E DAI2 EQ3 */
{ 0xFF, 0xFF, 0 }, /* 9F DAI2 EQ3 */
{ 0xFF, 0xFF, 0 }, /* A0 DAI2 EQ3 */
{ 0xFF, 0xFF, 0 }, /* A1 DAI2 EQ3 */
{ 0xFF, 0xFF, 0 }, /* A2 DAI2 EQ4 */
{ 0xFF, 0xFF, 0 }, /* A3 DAI2 EQ4 */
{ 0xFF, 0xFF, 0 }, /* A4 DAI2 EQ4 */
{ 0xFF, 0xFF, 0 }, /* A5 DAI2 EQ4 */
{ 0xFF, 0xFF, 0 }, /* A6 DAI2 EQ4 */
{ 0xFF, 0xFF, 0 }, /* A7 DAI2 EQ4 */
{ 0xFF, 0xFF, 0 }, /* A8 DAI2 EQ4 */
{ 0xFF, 0xFF, 0 }, /* A9 DAI2 EQ4 */
{ 0xFF, 0xFF, 0 }, /* AA DAI2 EQ4 */
{ 0xFF, 0xFF, 0 }, /* AB DAI2 EQ4 */
{ 0xFF, 0xFF, 0 }, /* AC DAI2 EQ5 */
{ 0xFF, 0xFF, 0 }, /* AD DAI2 EQ5 */
{ 0xFF, 0xFF, 0 }, /* AE DAI2 EQ5 */
{ 0xFF, 0xFF, 0 }, /* AF DAI2 EQ5 */
{ 0xFF, 0xFF, 0 }, /* B0 DAI2 EQ5 */
{ 0xFF, 0xFF, 0 }, /* B1 DAI2 EQ5 */
{ 0xFF, 0xFF, 0 }, /* B2 DAI2 EQ5 */
{ 0xFF, 0xFF, 0 }, /* B3 DAI2 EQ5 */
{ 0xFF, 0xFF, 0 }, /* B4 DAI2 EQ5 */
{ 0xFF, 0xFF, 0 }, /* B5 DAI2 EQ5 */
{ 0xFF, 0xFF, 0 }, /* B6 DAI1 biquad */
{ 0xFF, 0xFF, 0 }, /* B7 DAI1 biquad */
{ 0xFF, 0xFF, 0 }, /* B8 DAI1 biquad */
{ 0xFF, 0xFF, 0 }, /* B9 DAI1 biquad */
{ 0xFF, 0xFF, 0 }, /* BA DAI1 biquad */
{ 0xFF, 0xFF, 0 }, /* BB DAI1 biquad */
{ 0xFF, 0xFF, 0 }, /* BC DAI1 biquad */
{ 0xFF, 0xFF, 0 }, /* BD DAI1 biquad */
{ 0xFF, 0xFF, 0 }, /* BE DAI1 biquad */
{ 0xFF, 0xFF, 0 }, /* BF DAI1 biquad */
{ 0xFF, 0xFF, 0 }, /* C0 DAI2 biquad */
{ 0xFF, 0xFF, 0 }, /* C1 DAI2 biquad */
{ 0xFF, 0xFF, 0 }, /* C2 DAI2 biquad */
{ 0xFF, 0xFF, 0 }, /* C3 DAI2 biquad */
{ 0xFF, 0xFF, 0 }, /* C4 DAI2 biquad */
{ 0xFF, 0xFF, 0 }, /* C5 DAI2 biquad */
{ 0xFF, 0xFF, 0 }, /* C6 DAI2 biquad */
{ 0xFF, 0xFF, 0 }, /* C7 DAI2 biquad */
{ 0xFF, 0xFF, 0 }, /* C8 DAI2 biquad */
{ 0xFF, 0xFF, 0 }, /* C9 DAI2 biquad */
{ 0x00, 0x00, 0 }, /* CA */
{ 0x00, 0x00, 0 }, /* CB */
{ 0x00, 0x00, 0 }, /* CC */
{ 0x00, 0x00, 0 }, /* CD */
{ 0x00, 0x00, 0 }, /* CE */
{ 0x00, 0x00, 0 }, /* CF */
{ 0x00, 0x00, 0 }, /* D0 */
{ 0x00, 0x00, 0 }, /* D1 */
{ 0x00, 0x00, 0 }, /* D2 */
{ 0x00, 0x00, 0 }, /* D3 */
{ 0x00, 0x00, 0 }, /* D4 */
{ 0x00, 0x00, 0 }, /* D5 */
{ 0x00, 0x00, 0 }, /* D6 */
{ 0x00, 0x00, 0 }, /* D7 */
{ 0x00, 0x00, 0 }, /* D8 */
{ 0x00, 0x00, 0 }, /* D9 */
{ 0x00, 0x00, 0 }, /* DA */
{ 0x00, 0x00, 0 }, /* DB */
{ 0x00, 0x00, 0 }, /* DC */
{ 0x00, 0x00, 0 }, /* DD */
{ 0x00, 0x00, 0 }, /* DE */
{ 0x00, 0x00, 0 }, /* DF */
{ 0x00, 0x00, 0 }, /* E0 */
{ 0x00, 0x00, 0 }, /* E1 */
{ 0x00, 0x00, 0 }, /* E2 */
{ 0x00, 0x00, 0 }, /* E3 */
{ 0x00, 0x00, 0 }, /* E4 */
{ 0x00, 0x00, 0 }, /* E5 */
{ 0x00, 0x00, 0 }, /* E6 */
{ 0x00, 0x00, 0 }, /* E7 */
{ 0x00, 0x00, 0 }, /* E8 */
{ 0x00, 0x00, 0 }, /* E9 */
{ 0x00, 0x00, 0 }, /* EA */
{ 0x00, 0x00, 0 }, /* EB */
{ 0x00, 0x00, 0 }, /* EC */
{ 0x00, 0x00, 0 }, /* ED */
{ 0x00, 0x00, 0 }, /* EE */
{ 0x00, 0x00, 0 }, /* EF */
{ 0x00, 0x00, 0 }, /* F0 */
{ 0x00, 0x00, 0 }, /* F1 */
{ 0x00, 0x00, 0 }, /* F2 */
{ 0x00, 0x00, 0 }, /* F3 */
{ 0x00, 0x00, 0 }, /* F4 */
{ 0x00, 0x00, 0 }, /* F5 */
{ 0x00, 0x00, 0 }, /* F6 */
{ 0x00, 0x00, 0 }, /* F7 */
{ 0x00, 0x00, 0 }, /* F8 */
{ 0x00, 0x00, 0 }, /* F9 */
{ 0x00, 0x00, 0 }, /* FA */
{ 0x00, 0x00, 0 }, /* FB */
{ 0x00, 0x00, 0 }, /* FC */
{ 0x00, 0x00, 0 }, /* FD */
{ 0x00, 0x00, 0 }, /* FE */
{ 0xFF, 0x00, 1 }, /* FF */
};
static bool max98088_readable_register(struct device *dev, unsigned int reg)
{
return max98088_access[reg].readable;
switch (reg) {
case M98088_REG_00_IRQ_STATUS ... 0xC9:
case M98088_REG_FF_REV_ID:
return true;
default:
return false;
}
}
static bool max98088_writeable_register(struct device *dev, unsigned int reg)
{
switch (reg) {
case M98088_REG_03_BATTERY_VOLTAGE ... 0xC9:
return true;
default:
return false;
}
}
static bool max98088_volatile_register(struct device *dev, unsigned int reg)
{
return max98088_access[reg].vol;
switch (reg) {
case M98088_REG_00_IRQ_STATUS ... M98088_REG_03_BATTERY_VOLTAGE:
case M98088_REG_FF_REV_ID:
return true;
default:
return false;
}
}
static const struct regmap_config max98088_regmap = {
@ -551,6 +295,7 @@ static const struct regmap_config max98088_regmap = {
.val_bits = 8,
.readable_reg = max98088_readable_register,
.writeable_reg = max98088_writeable_register,
.volatile_reg = max98088_volatile_register,
.max_register = 0xff,

View file

@ -16,7 +16,7 @@
*/
#define M98088_REG_00_IRQ_STATUS 0x00
#define M98088_REG_01_MIC_STATUS 0x01
#define M98088_REG_02_JACK_STAUS 0x02
#define M98088_REG_02_JACK_STATUS 0x02
#define M98088_REG_03_BATTERY_VOLTAGE 0x03
#define M98088_REG_0F_IRQ_ENABLE 0x0F
#define M98088_REG_10_SYS_CLK 0x10

View file

@ -26,14 +26,9 @@ config SND_SST_IPC_ACPI
depends on ACPI
config SND_SOC_INTEL_SST
tristate "ASoC support for Intel(R) Smart Sound Technology"
tristate
select SND_SOC_INTEL_SST_ACPI if ACPI
depends on (X86 || COMPILE_TEST)
depends on DW_DMAC_CORE
help
This adds support for Intel(R) Smart Sound Technology (SST).
Say Y if you have such a device
If unsure select "N".
config SND_SOC_INTEL_SST_ACPI
tristate
@ -46,8 +41,9 @@ config SND_SOC_INTEL_BAYTRAIL
config SND_SOC_INTEL_HASWELL_MACH
tristate "ASoC Audio DSP support for Intel Haswell Lynxpoint"
depends on SND_SOC_INTEL_SST && X86_INTEL_LPSS && I2C && \
I2C_DESIGNWARE_PLATFORM
depends on X86_INTEL_LPSS && I2C && I2C_DESIGNWARE_PLATFORM
depends on DW_DMAC_CORE
select SND_SOC_INTEL_SST
select SND_SOC_INTEL_HASWELL
select SND_SOC_RT5640
help
@ -58,7 +54,9 @@ config SND_SOC_INTEL_HASWELL_MACH
config SND_SOC_INTEL_BYT_RT5640_MACH
tristate "ASoC Audio driver for Intel Baytrail with RT5640 codec"
depends on SND_SOC_INTEL_SST && X86_INTEL_LPSS && I2C
depends on X86_INTEL_LPSS && I2C
depends on DW_DMAC_CORE
select SND_SOC_INTEL_SST
select SND_SOC_INTEL_BAYTRAIL
select SND_SOC_RT5640
help
@ -67,7 +65,9 @@ config SND_SOC_INTEL_BYT_RT5640_MACH
config SND_SOC_INTEL_BYT_MAX98090_MACH
tristate "ASoC Audio driver for Intel Baytrail with MAX98090 codec"
depends on SND_SOC_INTEL_SST && X86_INTEL_LPSS && I2C
depends on X86_INTEL_LPSS && I2C
depends on DW_DMAC_CORE
select SND_SOC_INTEL_SST
select SND_SOC_INTEL_BAYTRAIL
select SND_SOC_MAX98090
help
@ -76,8 +76,10 @@ config SND_SOC_INTEL_BYT_MAX98090_MACH
config SND_SOC_INTEL_BROADWELL_MACH
tristate "ASoC Audio DSP support for Intel Broadwell Wildcatpoint"
depends on SND_SOC_INTEL_SST && X86_INTEL_LPSS && DW_DMAC && \
depends on X86_INTEL_LPSS && I2C && DW_DMAC && \
I2C_DESIGNWARE_PLATFORM
depends on DW_DMAC_CORE
select SND_SOC_INTEL_SST
select SND_SOC_INTEL_HASWELL
select SND_SOC_RT286
help
@ -132,3 +134,8 @@ config SND_SOC_INTEL_CHT_BSW_MAX98090_TI_MACH
This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell
platforms with MAX98090 audio codec it also can support TI jack chip as aux device.
If unsure select "N".
config SND_SOC_INTEL_SKYLAKE
tristate
select SND_HDA_EXT_CORE
select SND_SOC_INTEL_SST

View file

@ -5,6 +5,7 @@ obj-$(CONFIG_SND_SOC_INTEL_SST) += common/
obj-$(CONFIG_SND_SOC_INTEL_HASWELL) += haswell/
obj-$(CONFIG_SND_SOC_INTEL_BAYTRAIL) += baytrail/
obj-$(CONFIG_SND_SST_MFLD_PLATFORM) += atom/
obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE) += skylake/
# Machine support
obj-$(CONFIG_SND_SOC) += boards/

View file

@ -132,7 +132,7 @@ static int sst_send_slot_map(struct sst_data *drv)
sizeof(cmd.header) + cmd.header.length);
}
int sst_slot_enum_info(struct snd_kcontrol *kcontrol,
static int sst_slot_enum_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct sst_enum *e = (struct sst_enum *)kcontrol->private_value;

View file

@ -33,7 +33,6 @@
struct sst_device *sst;
static DEFINE_MUTEX(sst_lock);
extern struct snd_compr_ops sst_platform_compr_ops;
int sst_register_dsp(struct sst_device *dev)
{

View file

@ -25,6 +25,7 @@
#include "sst-atom-controls.h"
extern struct sst_device *sst;
extern struct snd_compr_ops sst_platform_compr_ops;
#define SST_MONO 1
#define SST_STEREO 2

View file

@ -151,6 +151,7 @@ static int sst_power_control(struct device *dev, bool state)
usage_count = GET_USAGE_COUNT(dev);
dev_dbg(ctx->dev, "Enable: pm usage count: %d\n", usage_count);
if (ret < 0) {
pm_runtime_put_sync(dev);
dev_err(ctx->dev, "Runtime get failed with err: %d\n", ret);
return ret;
}
@ -204,8 +205,10 @@ static int sst_cdev_open(struct device *dev,
struct intel_sst_drv *ctx = dev_get_drvdata(dev);
retval = pm_runtime_get_sync(ctx->dev);
if (retval < 0)
if (retval < 0) {
pm_runtime_put_sync(ctx->dev);
return retval;
}
str_id = sst_get_stream(ctx, str_params);
if (str_id > 0) {
@ -672,8 +675,10 @@ static int sst_send_byte_stream(struct device *dev,
if (NULL == bytes)
return -EINVAL;
ret_val = pm_runtime_get_sync(ctx->dev);
if (ret_val < 0)
if (ret_val < 0) {
pm_runtime_put_sync(ctx->dev);
return ret_val;
}
ret_val = sst_send_byte_stream_mrfld(ctx, bytes);
sst_pm_runtime_put(ctx);

View file

@ -352,10 +352,9 @@ void sst_process_reply_mrfld(struct intel_sst_drv *sst_drv_ctx,
* copy from mailbox
**/
if (msg_high.part.large) {
data = kzalloc(msg_low, GFP_KERNEL);
data = kmemdup((void *)msg->mailbox_data, msg_low, GFP_KERNEL);
if (!data)
return;
memcpy(data, (void *) msg->mailbox_data, msg_low);
/* Copy command id so that we can use to put sst to reset */
dsp_hdr = (struct ipc_dsp_hdr *)data;
cmd_id = dsp_hdr->cmd_id;

View file

@ -22,6 +22,8 @@
#include <linux/interrupt.h>
#include <linux/firmware.h>
#include "../skylake/skl-sst-dsp.h"
struct sst_mem_block;
struct sst_module;
struct sst_fw;
@ -258,6 +260,8 @@ struct sst_mem_block {
*/
struct sst_dsp {
/* Shared for all platforms */
/* runtime */
struct sst_dsp_device *sst_dev;
spinlock_t spinlock; /* IPC locking */
@ -268,10 +272,6 @@ struct sst_dsp {
int irq;
u32 id;
/* list of free and used ADSP memory blocks */
struct list_head used_block_list;
struct list_head free_block_list;
/* operations */
struct sst_ops *ops;
@ -284,6 +284,12 @@ struct sst_dsp {
/* mailbox */
struct sst_mailbox mailbox;
/* HSW/Byt data */
/* list of free and used ADSP memory blocks */
struct list_head used_block_list;
struct list_head free_block_list;
/* SST FW files loaded and their modules */
struct list_head module_list;
struct list_head fw_list;
@ -299,6 +305,15 @@ struct sst_dsp {
/* DMA FW loading */
struct sst_dma *dma;
bool fw_use_dma;
/* SKL data */
/* To allocate CL dma buffers */
struct skl_dsp_loader_ops dsp_ops;
struct skl_dsp_fw_ops fw_ops;
int sst_state;
struct skl_cl_dev cl_dev;
u32 intr_status;
};
/* Size optimised DRAM/IRAM memcpy */

View file

@ -20,6 +20,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/delay.h>
#include "sst-dsp.h"
#include "sst-dsp-priv.h"
@ -196,6 +197,22 @@ int sst_dsp_shim_update_bits64_unlocked(struct sst_dsp *sst, u32 offset,
}
EXPORT_SYMBOL_GPL(sst_dsp_shim_update_bits64_unlocked);
/* This is for registers bits with attribute RWC */
void sst_dsp_shim_update_bits_forced_unlocked(struct sst_dsp *sst, u32 offset,
u32 mask, u32 value)
{
unsigned int old, new;
u32 ret;
ret = sst_dsp_shim_read_unlocked(sst, offset);
old = ret;
new = (old & (~mask)) | (value & mask);
sst_dsp_shim_write_unlocked(sst, offset, new);
}
EXPORT_SYMBOL_GPL(sst_dsp_shim_update_bits_forced_unlocked);
int sst_dsp_shim_update_bits(struct sst_dsp *sst, u32 offset,
u32 mask, u32 value)
{
@ -222,6 +239,60 @@ int sst_dsp_shim_update_bits64(struct sst_dsp *sst, u32 offset,
}
EXPORT_SYMBOL_GPL(sst_dsp_shim_update_bits64);
/* This is for registers bits with attribute RWC */
void sst_dsp_shim_update_bits_forced(struct sst_dsp *sst, u32 offset,
u32 mask, u32 value)
{
unsigned long flags;
spin_lock_irqsave(&sst->spinlock, flags);
sst_dsp_shim_update_bits_forced_unlocked(sst, offset, mask, value);
spin_unlock_irqrestore(&sst->spinlock, flags);
}
EXPORT_SYMBOL_GPL(sst_dsp_shim_update_bits_forced);
int sst_dsp_register_poll(struct sst_dsp *ctx, u32 offset, u32 mask,
u32 target, u32 timeout, char *operation)
{
int time, ret;
u32 reg;
bool done = false;
/*
* we will poll for couple of ms using mdelay, if not successful
* then go to longer sleep using usleep_range
*/
/* check if set state successful */
for (time = 0; time < 5; time++) {
if ((sst_dsp_shim_read_unlocked(ctx, offset) & mask) == target) {
done = true;
break;
}
mdelay(1);
}
if (done == false) {
/* sleeping in 10ms steps so adjust timeout value */
timeout /= 10;
for (time = 0; time < timeout; time++) {
if ((sst_dsp_shim_read_unlocked(ctx, offset) & mask) == target)
break;
usleep_range(5000, 10000);
}
}
reg = sst_dsp_shim_read_unlocked(ctx, offset);
dev_info(ctx->dev, "FW Poll Status: reg=%#x %s %s\n", reg, operation,
(time < timeout) ? "successful" : "timedout");
ret = time < timeout ? 0 : -ETIME;
return ret;
}
EXPORT_SYMBOL_GPL(sst_dsp_register_poll);
void sst_dsp_dump(struct sst_dsp *sst)
{
if (sst->ops->dump)

View file

@ -230,6 +230,8 @@ void sst_dsp_shim_write64(struct sst_dsp *sst, u32 offset, u64 value);
u64 sst_dsp_shim_read64(struct sst_dsp *sst, u32 offset);
int sst_dsp_shim_update_bits64(struct sst_dsp *sst, u32 offset,
u64 mask, u64 value);
void sst_dsp_shim_update_bits_forced(struct sst_dsp *sst, u32 offset,
u32 mask, u32 value);
/* SHIM Read / Write Unlocked for callers already holding sst lock */
void sst_dsp_shim_write_unlocked(struct sst_dsp *sst, u32 offset, u32 value);
@ -240,6 +242,8 @@ void sst_dsp_shim_write64_unlocked(struct sst_dsp *sst, u32 offset, u64 value);
u64 sst_dsp_shim_read64_unlocked(struct sst_dsp *sst, u32 offset);
int sst_dsp_shim_update_bits64_unlocked(struct sst_dsp *sst, u32 offset,
u64 mask, u64 value);
void sst_dsp_shim_update_bits_forced_unlocked(struct sst_dsp *sst, u32 offset,
u32 mask, u32 value);
/* Internal generic low-level SST IO functions - can be overidden */
void sst_shim32_write(void __iomem *addr, u32 offset, u32 value);
@ -278,6 +282,8 @@ void sst_dsp_inbox_read(struct sst_dsp *dsp, void *message, size_t bytes);
void sst_dsp_outbox_write(struct sst_dsp *dsp, void *message, size_t bytes);
void sst_dsp_outbox_read(struct sst_dsp *dsp, void *message, size_t bytes);
void sst_dsp_mailbox_dump(struct sst_dsp *dsp, size_t bytes);
int sst_dsp_register_poll(struct sst_dsp *dsp, u32 offset, u32 mask,
u32 expected_value, u32 timeout, char *operation);
/* Debug */
void sst_dsp_dump(struct sst_dsp *sst);

View file

@ -0,0 +1,9 @@
snd-soc-skl-objs := skl.o skl-pcm.o skl-nhlt.o skl-messages.o
obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE) += snd-soc-skl.o
# Skylake IPC Support
snd-soc-skl-ipc-objs := skl-sst-ipc.o skl-sst-dsp.o skl-sst-cldma.o \
skl-sst.o
obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE) += snd-soc-skl-ipc.o

View file

@ -0,0 +1,884 @@
/*
* skl-message.c - HDA DSP interface for FW registration, Pipe and Module
* configurations
*
* Copyright (C) 2015 Intel Corp
* Author:Rafal Redzimski <rafal.f.redzimski@intel.com>
* Jeeja KP <jeeja.kp@intel.com>
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/slab.h>
#include <linux/pci.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include "skl-sst-dsp.h"
#include "skl-sst-ipc.h"
#include "skl.h"
#include "../common/sst-dsp.h"
#include "../common/sst-dsp-priv.h"
#include "skl-topology.h"
#include "skl-tplg-interface.h"
static int skl_alloc_dma_buf(struct device *dev,
struct snd_dma_buffer *dmab, size_t size)
{
struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
struct hdac_bus *bus = ebus_to_hbus(ebus);
if (!bus)
return -ENODEV;
return bus->io_ops->dma_alloc_pages(bus, SNDRV_DMA_TYPE_DEV, size, dmab);
}
static int skl_free_dma_buf(struct device *dev, struct snd_dma_buffer *dmab)
{
struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
struct hdac_bus *bus = ebus_to_hbus(ebus);
if (!bus)
return -ENODEV;
bus->io_ops->dma_free_pages(bus, dmab);
return 0;
}
int skl_init_dsp(struct skl *skl)
{
void __iomem *mmio_base;
struct hdac_ext_bus *ebus = &skl->ebus;
struct hdac_bus *bus = ebus_to_hbus(ebus);
int irq = bus->irq;
struct skl_dsp_loader_ops loader_ops;
int ret;
loader_ops.alloc_dma_buf = skl_alloc_dma_buf;
loader_ops.free_dma_buf = skl_free_dma_buf;
/* enable ppcap interrupt */
snd_hdac_ext_bus_ppcap_enable(&skl->ebus, true);
snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, true);
/* read the BAR of the ADSP MMIO */
mmio_base = pci_ioremap_bar(skl->pci, 4);
if (mmio_base == NULL) {
dev_err(bus->dev, "ioremap error\n");
return -ENXIO;
}
ret = skl_sst_dsp_init(bus->dev, mmio_base, irq,
loader_ops, &skl->skl_sst);
dev_dbg(bus->dev, "dsp registration status=%d\n", ret);
return ret;
}
void skl_free_dsp(struct skl *skl)
{
struct hdac_ext_bus *ebus = &skl->ebus;
struct hdac_bus *bus = ebus_to_hbus(ebus);
struct skl_sst *ctx = skl->skl_sst;
/* disable ppcap interrupt */
snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, false);
skl_sst_dsp_cleanup(bus->dev, ctx);
if (ctx->dsp->addr.lpe)
iounmap(ctx->dsp->addr.lpe);
}
int skl_suspend_dsp(struct skl *skl)
{
struct skl_sst *ctx = skl->skl_sst;
int ret;
/* if ppcap is not supported return 0 */
if (!skl->ebus.ppcap)
return 0;
ret = skl_dsp_sleep(ctx->dsp);
if (ret < 0)
return ret;
/* disable ppcap interrupt */
snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, false);
snd_hdac_ext_bus_ppcap_enable(&skl->ebus, false);
return 0;
}
int skl_resume_dsp(struct skl *skl)
{
struct skl_sst *ctx = skl->skl_sst;
/* if ppcap is not supported return 0 */
if (!skl->ebus.ppcap)
return 0;
/* enable ppcap interrupt */
snd_hdac_ext_bus_ppcap_enable(&skl->ebus, true);
snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, true);
return skl_dsp_wake(ctx->dsp);
}
enum skl_bitdepth skl_get_bit_depth(int params)
{
switch (params) {
case 8:
return SKL_DEPTH_8BIT;
case 16:
return SKL_DEPTH_16BIT;
case 24:
return SKL_DEPTH_24BIT;
case 32:
return SKL_DEPTH_32BIT;
default:
return SKL_DEPTH_INVALID;
}
}
static u32 skl_create_channel_map(enum skl_ch_cfg ch_cfg)
{
u32 config;
switch (ch_cfg) {
case SKL_CH_CFG_MONO:
config = (0xFFFFFFF0 | SKL_CHANNEL_LEFT);
break;
case SKL_CH_CFG_STEREO:
config = (0xFFFFFF00 | SKL_CHANNEL_LEFT
| (SKL_CHANNEL_RIGHT << 4));
break;
case SKL_CH_CFG_2_1:
config = (0xFFFFF000 | SKL_CHANNEL_LEFT
| (SKL_CHANNEL_RIGHT << 4)
| (SKL_CHANNEL_LFE << 8));
break;
case SKL_CH_CFG_3_0:
config = (0xFFFFF000 | SKL_CHANNEL_LEFT
| (SKL_CHANNEL_CENTER << 4)
| (SKL_CHANNEL_RIGHT << 8));
break;
case SKL_CH_CFG_3_1:
config = (0xFFFF0000 | SKL_CHANNEL_LEFT
| (SKL_CHANNEL_CENTER << 4)
| (SKL_CHANNEL_RIGHT << 8)
| (SKL_CHANNEL_LFE << 12));
break;
case SKL_CH_CFG_QUATRO:
config = (0xFFFF0000 | SKL_CHANNEL_LEFT
| (SKL_CHANNEL_RIGHT << 4)
| (SKL_CHANNEL_LEFT_SURROUND << 8)
| (SKL_CHANNEL_RIGHT_SURROUND << 12));
break;
case SKL_CH_CFG_4_0:
config = (0xFFFF0000 | SKL_CHANNEL_LEFT
| (SKL_CHANNEL_CENTER << 4)
| (SKL_CHANNEL_RIGHT << 8)
| (SKL_CHANNEL_CENTER_SURROUND << 12));
break;
case SKL_CH_CFG_5_0:
config = (0xFFF00000 | SKL_CHANNEL_LEFT
| (SKL_CHANNEL_CENTER << 4)
| (SKL_CHANNEL_RIGHT << 8)
| (SKL_CHANNEL_LEFT_SURROUND << 12)
| (SKL_CHANNEL_RIGHT_SURROUND << 16));
break;
case SKL_CH_CFG_5_1:
config = (0xFF000000 | SKL_CHANNEL_CENTER
| (SKL_CHANNEL_LEFT << 4)
| (SKL_CHANNEL_RIGHT << 8)
| (SKL_CHANNEL_LEFT_SURROUND << 12)
| (SKL_CHANNEL_RIGHT_SURROUND << 16)
| (SKL_CHANNEL_LFE << 20));
break;
case SKL_CH_CFG_DUAL_MONO:
config = (0xFFFFFF00 | SKL_CHANNEL_LEFT
| (SKL_CHANNEL_LEFT << 4));
break;
case SKL_CH_CFG_I2S_DUAL_STEREO_0:
config = (0xFFFFFF00 | SKL_CHANNEL_LEFT
| (SKL_CHANNEL_RIGHT << 4));
break;
case SKL_CH_CFG_I2S_DUAL_STEREO_1:
config = (0xFFFF00FF | (SKL_CHANNEL_LEFT << 8)
| (SKL_CHANNEL_RIGHT << 12));
break;
default:
config = 0xFFFFFFFF;
break;
}
return config;
}
/*
* Each module in DSP expects a base module configuration, which consists of
* PCM format information, which we calculate in driver and resource values
* which are read from widget information passed through topology binary
* This is send when we create a module with INIT_INSTANCE IPC msg
*/
static void skl_set_base_module_format(struct skl_sst *ctx,
struct skl_module_cfg *mconfig,
struct skl_base_cfg *base_cfg)
{
struct skl_module_fmt *format = &mconfig->in_fmt;
base_cfg->audio_fmt.number_of_channels = (u8)format->channels;
base_cfg->audio_fmt.s_freq = format->s_freq;
base_cfg->audio_fmt.bit_depth = format->bit_depth;
base_cfg->audio_fmt.valid_bit_depth = format->valid_bit_depth;
base_cfg->audio_fmt.ch_cfg = format->ch_cfg;
dev_dbg(ctx->dev, "bit_depth=%x valid_bd=%x ch_config=%x\n",
format->bit_depth, format->valid_bit_depth,
format->ch_cfg);
base_cfg->audio_fmt.channel_map = skl_create_channel_map(
base_cfg->audio_fmt.ch_cfg);
base_cfg->audio_fmt.interleaving = SKL_INTERLEAVING_PER_CHANNEL;
base_cfg->cps = mconfig->mcps;
base_cfg->ibs = mconfig->ibs;
base_cfg->obs = mconfig->obs;
}
/*
* Copies copier capabilities into copier module and updates copier module
* config size.
*/
static void skl_copy_copier_caps(struct skl_module_cfg *mconfig,
struct skl_cpr_cfg *cpr_mconfig)
{
if (mconfig->formats_config.caps_size == 0)
return;
memcpy(cpr_mconfig->gtw_cfg.config_data,
mconfig->formats_config.caps,
mconfig->formats_config.caps_size);
cpr_mconfig->gtw_cfg.config_length =
(mconfig->formats_config.caps_size) / 4;
}
/*
* Calculate the gatewat settings required for copier module, type of
* gateway and index of gateway to use
*/
static void skl_setup_cpr_gateway_cfg(struct skl_sst *ctx,
struct skl_module_cfg *mconfig,
struct skl_cpr_cfg *cpr_mconfig)
{
union skl_connector_node_id node_id = {0};
struct skl_pipe_params *params = mconfig->pipe->p_params;
switch (mconfig->dev_type) {
case SKL_DEVICE_BT:
node_id.node.dma_type =
(SKL_CONN_SOURCE == mconfig->hw_conn_type) ?
SKL_DMA_I2S_LINK_OUTPUT_CLASS :
SKL_DMA_I2S_LINK_INPUT_CLASS;
node_id.node.vindex = params->host_dma_id +
(mconfig->vbus_id << 3);
break;
case SKL_DEVICE_I2S:
node_id.node.dma_type =
(SKL_CONN_SOURCE == mconfig->hw_conn_type) ?
SKL_DMA_I2S_LINK_OUTPUT_CLASS :
SKL_DMA_I2S_LINK_INPUT_CLASS;
node_id.node.vindex = params->host_dma_id +
(mconfig->time_slot << 1) +
(mconfig->vbus_id << 3);
break;
case SKL_DEVICE_DMIC:
node_id.node.dma_type = SKL_DMA_DMIC_LINK_INPUT_CLASS;
node_id.node.vindex = mconfig->vbus_id +
(mconfig->time_slot);
break;
case SKL_DEVICE_HDALINK:
node_id.node.dma_type =
(SKL_CONN_SOURCE == mconfig->hw_conn_type) ?
SKL_DMA_HDA_LINK_OUTPUT_CLASS :
SKL_DMA_HDA_LINK_INPUT_CLASS;
node_id.node.vindex = params->link_dma_id;
break;
default:
node_id.node.dma_type =
(SKL_CONN_SOURCE == mconfig->hw_conn_type) ?
SKL_DMA_HDA_HOST_OUTPUT_CLASS :
SKL_DMA_HDA_HOST_INPUT_CLASS;
node_id.node.vindex = params->host_dma_id;
break;
}
cpr_mconfig->gtw_cfg.node_id = node_id.val;
if (SKL_CONN_SOURCE == mconfig->hw_conn_type)
cpr_mconfig->gtw_cfg.dma_buffer_size = 2 * mconfig->obs;
else
cpr_mconfig->gtw_cfg.dma_buffer_size = 2 * mconfig->ibs;
cpr_mconfig->cpr_feature_mask = 0;
cpr_mconfig->gtw_cfg.config_length = 0;
skl_copy_copier_caps(mconfig, cpr_mconfig);
}
static void skl_setup_out_format(struct skl_sst *ctx,
struct skl_module_cfg *mconfig,
struct skl_audio_data_format *out_fmt)
{
struct skl_module_fmt *format = &mconfig->out_fmt;
out_fmt->number_of_channels = (u8)format->channels;
out_fmt->s_freq = format->s_freq;
out_fmt->bit_depth = format->bit_depth;
out_fmt->valid_bit_depth = format->valid_bit_depth;
out_fmt->ch_cfg = format->ch_cfg;
out_fmt->channel_map = skl_create_channel_map(out_fmt->ch_cfg);
out_fmt->interleaving = SKL_INTERLEAVING_PER_CHANNEL;
dev_dbg(ctx->dev, "copier out format chan=%d fre=%d bitdepth=%d\n",
out_fmt->number_of_channels, format->s_freq, format->bit_depth);
}
/*
* DSP needs SRC module for frequency conversion, SRC takes base module
* configuration and the target frequency as extra parameter passed as src
* config
*/
static void skl_set_src_format(struct skl_sst *ctx,
struct skl_module_cfg *mconfig,
struct skl_src_module_cfg *src_mconfig)
{
struct skl_module_fmt *fmt = &mconfig->out_fmt;
skl_set_base_module_format(ctx, mconfig,
(struct skl_base_cfg *)src_mconfig);
src_mconfig->src_cfg = fmt->s_freq;
}
/*
* DSP needs updown module to do channel conversion. updown module take base
* module configuration and channel configuration
* It also take coefficients and now we have defaults applied here
*/
static void skl_set_updown_mixer_format(struct skl_sst *ctx,
struct skl_module_cfg *mconfig,
struct skl_up_down_mixer_cfg *mixer_mconfig)
{
struct skl_module_fmt *fmt = &mconfig->out_fmt;
int i = 0;
skl_set_base_module_format(ctx, mconfig,
(struct skl_base_cfg *)mixer_mconfig);
mixer_mconfig->out_ch_cfg = fmt->ch_cfg;
/* Select F/W default coefficient */
mixer_mconfig->coeff_sel = 0x0;
/* User coeff, don't care since we are selecting F/W defaults */
for (i = 0; i < UP_DOWN_MIXER_MAX_COEFF; i++)
mixer_mconfig->coeff[i] = 0xDEADBEEF;
}
/*
* 'copier' is DSP internal module which copies data from Host DMA (HDA host
* dma) or link (hda link, SSP, PDM)
* Here we calculate the copier module parameters, like PCM format, output
* format, gateway settings
* copier_module_config is sent as input buffer with INIT_INSTANCE IPC msg
*/
static void skl_set_copier_format(struct skl_sst *ctx,
struct skl_module_cfg *mconfig,
struct skl_cpr_cfg *cpr_mconfig)
{
struct skl_audio_data_format *out_fmt = &cpr_mconfig->out_fmt;
struct skl_base_cfg *base_cfg = (struct skl_base_cfg *)cpr_mconfig;
skl_set_base_module_format(ctx, mconfig, base_cfg);
skl_setup_out_format(ctx, mconfig, out_fmt);
skl_setup_cpr_gateway_cfg(ctx, mconfig, cpr_mconfig);
}
static u16 skl_get_module_param_size(struct skl_sst *ctx,
struct skl_module_cfg *mconfig)
{
u16 param_size;
switch (mconfig->m_type) {
case SKL_MODULE_TYPE_COPIER:
param_size = sizeof(struct skl_cpr_cfg);
param_size += mconfig->formats_config.caps_size;
return param_size;
case SKL_MODULE_TYPE_SRCINT:
return sizeof(struct skl_src_module_cfg);
case SKL_MODULE_TYPE_UPDWMIX:
return sizeof(struct skl_up_down_mixer_cfg);
default:
/*
* return only base cfg when no specific module type is
* specified
*/
return sizeof(struct skl_base_cfg);
}
return 0;
}
/*
* DSP firmware supports various modules like copier, SRC, updown etc.
* These modules required various parameters to be calculated and sent for
* the module initialization to DSP. By default a generic module needs only
* base module format configuration
*/
static int skl_set_module_format(struct skl_sst *ctx,
struct skl_module_cfg *module_config,
u16 *module_config_size,
void **param_data)
{
u16 param_size;
param_size = skl_get_module_param_size(ctx, module_config);
*param_data = kzalloc(param_size, GFP_KERNEL);
if (NULL == *param_data)
return -ENOMEM;
*module_config_size = param_size;
switch (module_config->m_type) {
case SKL_MODULE_TYPE_COPIER:
skl_set_copier_format(ctx, module_config, *param_data);
break;
case SKL_MODULE_TYPE_SRCINT:
skl_set_src_format(ctx, module_config, *param_data);
break;
case SKL_MODULE_TYPE_UPDWMIX:
skl_set_updown_mixer_format(ctx, module_config, *param_data);
break;
default:
skl_set_base_module_format(ctx, module_config, *param_data);
break;
}
dev_dbg(ctx->dev, "Module type=%d config size: %d bytes\n",
module_config->id.module_id, param_size);
print_hex_dump(KERN_DEBUG, "Module params:", DUMP_PREFIX_OFFSET, 8, 4,
*param_data, param_size, false);
return 0;
}
static int skl_get_queue_index(struct skl_module_pin *mpin,
struct skl_module_inst_id id, int max)
{
int i;
for (i = 0; i < max; i++) {
if (mpin[i].id.module_id == id.module_id &&
mpin[i].id.instance_id == id.instance_id)
return i;
}
return -EINVAL;
}
/*
* Allocates queue for each module.
* if dynamic, the pin_index is allocated 0 to max_pin.
* In static, the pin_index is fixed based on module_id and instance id
*/
static int skl_alloc_queue(struct skl_module_pin *mpin,
struct skl_module_inst_id id, int max)
{
int i;
/*
* if pin in dynamic, find first free pin
* otherwise find match module and instance id pin as topology will
* ensure a unique pin is assigned to this so no need to
* allocate/free
*/
for (i = 0; i < max; i++) {
if (mpin[i].is_dynamic) {
if (!mpin[i].in_use) {
mpin[i].in_use = true;
mpin[i].id.module_id = id.module_id;
mpin[i].id.instance_id = id.instance_id;
return i;
}
} else {
if (mpin[i].id.module_id == id.module_id &&
mpin[i].id.instance_id == id.instance_id)
return i;
}
}
return -EINVAL;
}
static void skl_free_queue(struct skl_module_pin *mpin, int q_index)
{
if (mpin[q_index].is_dynamic) {
mpin[q_index].in_use = false;
mpin[q_index].id.module_id = 0;
mpin[q_index].id.instance_id = 0;
}
}
/*
* A module needs to be instanataited in DSP. A mdoule is present in a
* collection of module referred as a PIPE.
* We first calculate the module format, based on module type and then
* invoke the DSP by sending IPC INIT_INSTANCE using ipc helper
*/
int skl_init_module(struct skl_sst *ctx,
struct skl_module_cfg *mconfig, char *param)
{
u16 module_config_size = 0;
void *param_data = NULL;
int ret;
struct skl_ipc_init_instance_msg msg;
dev_dbg(ctx->dev, "%s: module_id = %d instance=%d\n", __func__,
mconfig->id.module_id, mconfig->id.instance_id);
if (mconfig->pipe->state != SKL_PIPE_CREATED) {
dev_err(ctx->dev, "Pipe not created state= %d pipe_id= %d\n",
mconfig->pipe->state, mconfig->pipe->ppl_id);
return -EIO;
}
ret = skl_set_module_format(ctx, mconfig,
&module_config_size, &param_data);
if (ret < 0) {
dev_err(ctx->dev, "Failed to set module format ret=%d\n", ret);
return ret;
}
msg.module_id = mconfig->id.module_id;
msg.instance_id = mconfig->id.instance_id;
msg.ppl_instance_id = mconfig->pipe->ppl_id;
msg.param_data_size = module_config_size;
msg.core_id = mconfig->core_id;
ret = skl_ipc_init_instance(&ctx->ipc, &msg, param_data);
if (ret < 0) {
dev_err(ctx->dev, "Failed to init instance ret=%d\n", ret);
kfree(param_data);
return ret;
}
mconfig->m_state = SKL_MODULE_INIT_DONE;
return ret;
}
static void skl_dump_bind_info(struct skl_sst *ctx, struct skl_module_cfg
*src_module, struct skl_module_cfg *dst_module)
{
dev_dbg(ctx->dev, "%s: src module_id = %d src_instance=%d\n",
__func__, src_module->id.module_id, src_module->id.instance_id);
dev_dbg(ctx->dev, "%s: dst_module=%d dst_instacne=%d\n", __func__,
dst_module->id.module_id, dst_module->id.instance_id);
dev_dbg(ctx->dev, "src_module state = %d dst module state = %d\n",
src_module->m_state, dst_module->m_state);
}
/*
* On module freeup, we need to unbind the module with modules
* it is already bind.
* Find the pin allocated and unbind then using bind_unbind IPC
*/
int skl_unbind_modules(struct skl_sst *ctx,
struct skl_module_cfg *src_mcfg,
struct skl_module_cfg *dst_mcfg)
{
int ret;
struct skl_ipc_bind_unbind_msg msg;
struct skl_module_inst_id src_id = src_mcfg->id;
struct skl_module_inst_id dst_id = dst_mcfg->id;
int in_max = dst_mcfg->max_in_queue;
int out_max = src_mcfg->max_out_queue;
int src_index, dst_index;
skl_dump_bind_info(ctx, src_mcfg, dst_mcfg);
if (src_mcfg->m_state != SKL_MODULE_BIND_DONE)
return 0;
/*
* if intra module unbind, check if both modules are BIND,
* then send unbind
*/
if ((src_mcfg->pipe->ppl_id != dst_mcfg->pipe->ppl_id) &&
dst_mcfg->m_state != SKL_MODULE_BIND_DONE)
return 0;
else if (src_mcfg->m_state < SKL_MODULE_INIT_DONE &&
dst_mcfg->m_state < SKL_MODULE_INIT_DONE)
return 0;
/* get src queue index */
src_index = skl_get_queue_index(src_mcfg->m_out_pin, dst_id, out_max);
if (src_index < 0)
return -EINVAL;
msg.src_queue = src_mcfg->m_out_pin[src_index].pin_index;
/* get dst queue index */
dst_index = skl_get_queue_index(dst_mcfg->m_in_pin, src_id, in_max);
if (dst_index < 0)
return -EINVAL;
msg.dst_queue = dst_mcfg->m_in_pin[dst_index].pin_index;
msg.module_id = src_mcfg->id.module_id;
msg.instance_id = src_mcfg->id.instance_id;
msg.dst_module_id = dst_mcfg->id.module_id;
msg.dst_instance_id = dst_mcfg->id.instance_id;
msg.bind = false;
ret = skl_ipc_bind_unbind(&ctx->ipc, &msg);
if (!ret) {
src_mcfg->m_state = SKL_MODULE_UNINIT;
/* free queue only if unbind is success */
skl_free_queue(src_mcfg->m_out_pin, src_index);
skl_free_queue(dst_mcfg->m_in_pin, dst_index);
}
return ret;
}
/*
* Once a module is instantiated it need to be 'bind' with other modules in
* the pipeline. For binding we need to find the module pins which are bind
* together
* This function finds the pins and then sends bund_unbind IPC message to
* DSP using IPC helper
*/
int skl_bind_modules(struct skl_sst *ctx,
struct skl_module_cfg *src_mcfg,
struct skl_module_cfg *dst_mcfg)
{
int ret;
struct skl_ipc_bind_unbind_msg msg;
struct skl_module_inst_id src_id = src_mcfg->id;
struct skl_module_inst_id dst_id = dst_mcfg->id;
int in_max = dst_mcfg->max_in_queue;
int out_max = src_mcfg->max_out_queue;
int src_index, dst_index;
skl_dump_bind_info(ctx, src_mcfg, dst_mcfg);
if (src_mcfg->m_state < SKL_MODULE_INIT_DONE &&
dst_mcfg->m_state < SKL_MODULE_INIT_DONE)
return 0;
src_index = skl_alloc_queue(src_mcfg->m_out_pin, dst_id, out_max);
if (src_index < 0)
return -EINVAL;
msg.src_queue = src_mcfg->m_out_pin[src_index].pin_index;
dst_index = skl_alloc_queue(dst_mcfg->m_in_pin, src_id, in_max);
if (dst_index < 0) {
skl_free_queue(src_mcfg->m_out_pin, src_index);
return -EINVAL;
}
msg.dst_queue = dst_mcfg->m_in_pin[dst_index].pin_index;
dev_dbg(ctx->dev, "src queue = %d dst queue =%d\n",
msg.src_queue, msg.dst_queue);
msg.module_id = src_mcfg->id.module_id;
msg.instance_id = src_mcfg->id.instance_id;
msg.dst_module_id = dst_mcfg->id.module_id;
msg.dst_instance_id = dst_mcfg->id.instance_id;
msg.bind = true;
ret = skl_ipc_bind_unbind(&ctx->ipc, &msg);
if (!ret) {
src_mcfg->m_state = SKL_MODULE_BIND_DONE;
} else {
/* error case , if IPC fails, clear the queue index */
skl_free_queue(src_mcfg->m_out_pin, src_index);
skl_free_queue(dst_mcfg->m_in_pin, dst_index);
}
return ret;
}
static int skl_set_pipe_state(struct skl_sst *ctx, struct skl_pipe *pipe,
enum skl_ipc_pipeline_state state)
{
dev_dbg(ctx->dev, "%s: pipe_satate = %d\n", __func__, state);
return skl_ipc_set_pipeline_state(&ctx->ipc, pipe->ppl_id, state);
}
/*
* A pipeline is a collection of modules. Before a module in instantiated a
* pipeline needs to be created for it.
* This function creates pipeline, by sending create pipeline IPC messages
* to FW
*/
int skl_create_pipeline(struct skl_sst *ctx, struct skl_pipe *pipe)
{
int ret;
dev_dbg(ctx->dev, "%s: pipe_id = %d\n", __func__, pipe->ppl_id);
ret = skl_ipc_create_pipeline(&ctx->ipc, pipe->memory_pages,
pipe->pipe_priority, pipe->ppl_id);
if (ret < 0) {
dev_err(ctx->dev, "Failed to create pipeline\n");
return ret;
}
pipe->state = SKL_PIPE_CREATED;
return 0;
}
/*
* A pipeline needs to be deleted on cleanup. If a pipeline is running, then
* pause the pipeline first and then delete it
* The pipe delete is done by sending delete pipeline IPC. DSP will stop the
* DMA engines and releases resources
*/
int skl_delete_pipe(struct skl_sst *ctx, struct skl_pipe *pipe)
{
int ret;
dev_dbg(ctx->dev, "%s: pipe = %d\n", __func__, pipe->ppl_id);
/* If pipe is not started, do not try to stop the pipe in FW. */
if (pipe->state > SKL_PIPE_STARTED) {
ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED);
if (ret < 0) {
dev_err(ctx->dev, "Failed to stop pipeline\n");
return ret;
}
pipe->state = SKL_PIPE_PAUSED;
} else {
/* If pipe was not created in FW, do not try to delete it */
if (pipe->state < SKL_PIPE_CREATED)
return 0;
ret = skl_ipc_delete_pipeline(&ctx->ipc, pipe->ppl_id);
if (ret < 0)
dev_err(ctx->dev, "Failed to delete pipeline\n");
}
return ret;
}
/*
* A pipeline is also a scheduling entity in DSP which can be run, stopped
* For processing data the pipe need to be run by sending IPC set pipe state
* to DSP
*/
int skl_run_pipe(struct skl_sst *ctx, struct skl_pipe *pipe)
{
int ret;
dev_dbg(ctx->dev, "%s: pipe = %d\n", __func__, pipe->ppl_id);
/* If pipe was not created in FW, do not try to pause or delete */
if (pipe->state < SKL_PIPE_CREATED)
return 0;
/* Pipe has to be paused before it is started */
ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED);
if (ret < 0) {
dev_err(ctx->dev, "Failed to pause pipe\n");
return ret;
}
pipe->state = SKL_PIPE_PAUSED;
ret = skl_set_pipe_state(ctx, pipe, PPL_RUNNING);
if (ret < 0) {
dev_err(ctx->dev, "Failed to start pipe\n");
return ret;
}
pipe->state = SKL_PIPE_STARTED;
return 0;
}
/*
* Stop the pipeline by sending set pipe state IPC
* DSP doesnt implement stop so we always send pause message
*/
int skl_stop_pipe(struct skl_sst *ctx, struct skl_pipe *pipe)
{
int ret;
dev_dbg(ctx->dev, "In %s pipe=%d\n", __func__, pipe->ppl_id);
/* If pipe was not created in FW, do not try to pause or delete */
if (pipe->state < SKL_PIPE_PAUSED)
return 0;
ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED);
if (ret < 0) {
dev_dbg(ctx->dev, "Failed to stop pipe\n");
return ret;
}
pipe->state = SKL_PIPE_CREATED;
return 0;
}

View file

@ -0,0 +1,140 @@
/*
* skl-nhlt.c - Intel SKL Platform NHLT parsing
*
* Copyright (C) 2015 Intel Corp
* Author: Sanjiv Kumar <sanjiv.kumar@intel.com>
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
*/
#include "skl.h"
/* Unique identification for getting NHLT blobs */
static u8 OSC_UUID[16] = {0x6E, 0x88, 0x9F, 0xA6, 0xEB, 0x6C, 0x94, 0x45,
0xA4, 0x1F, 0x7B, 0x5D, 0xCE, 0x24, 0xC5, 0x53};
#define DSDT_NHLT_PATH "\\_SB.PCI0.HDAS"
void __iomem *skl_nhlt_init(struct device *dev)
{
acpi_handle handle;
union acpi_object *obj;
struct nhlt_resource_desc *nhlt_ptr = NULL;
if (ACPI_FAILURE(acpi_get_handle(NULL, DSDT_NHLT_PATH, &handle))) {
dev_err(dev, "Requested NHLT device not found\n");
return NULL;
}
obj = acpi_evaluate_dsm(handle, OSC_UUID, 1, 1, NULL);
if (obj && obj->type == ACPI_TYPE_BUFFER) {
nhlt_ptr = (struct nhlt_resource_desc *)obj->buffer.pointer;
return ioremap_cache(nhlt_ptr->min_addr, nhlt_ptr->length);
}
dev_err(dev, "device specific method to extract NHLT blob failed\n");
return NULL;
}
void skl_nhlt_free(void __iomem *addr)
{
iounmap(addr);
addr = NULL;
}
static struct nhlt_specific_cfg *skl_get_specific_cfg(
struct device *dev, struct nhlt_fmt *fmt,
u8 no_ch, u32 rate, u16 bps)
{
struct nhlt_specific_cfg *sp_config;
struct wav_fmt *wfmt;
struct nhlt_fmt_cfg *fmt_config = fmt->fmt_config;
int i;
dev_dbg(dev, "Format count =%d\n", fmt->fmt_count);
for (i = 0; i < fmt->fmt_count; i++) {
wfmt = &fmt_config->fmt_ext.fmt;
dev_dbg(dev, "ch=%d fmt=%d s_rate=%d\n", wfmt->channels,
wfmt->bits_per_sample, wfmt->samples_per_sec);
if (wfmt->channels == no_ch && wfmt->samples_per_sec == rate &&
wfmt->bits_per_sample == bps) {
sp_config = &fmt_config->config;
return sp_config;
}
fmt_config = (struct nhlt_fmt_cfg *)(fmt_config->config.caps +
fmt_config->config.size);
}
return NULL;
}
static void dump_config(struct device *dev, u32 instance_id, u8 linktype,
u8 s_fmt, u8 num_channels, u32 s_rate, u8 dirn, u16 bps)
{
dev_dbg(dev, "Input configuration\n");
dev_dbg(dev, "ch=%d fmt=%d s_rate=%d\n", num_channels, s_fmt, s_rate);
dev_dbg(dev, "vbus_id=%d link_type=%d\n", instance_id, linktype);
dev_dbg(dev, "bits_per_sample=%d\n", bps);
}
static bool skl_check_ep_match(struct device *dev, struct nhlt_endpoint *epnt,
u32 instance_id, u8 link_type, u8 dirn)
{
dev_dbg(dev, "vbus_id=%d link_type=%d dir=%d\n",
epnt->virtual_bus_id, epnt->linktype, epnt->direction);
if ((epnt->virtual_bus_id == instance_id) &&
(epnt->linktype == link_type) &&
(epnt->direction == dirn))
return true;
else
return false;
}
struct nhlt_specific_cfg
*skl_get_ep_blob(struct skl *skl, u32 instance, u8 link_type,
u8 s_fmt, u8 num_ch, u32 s_rate, u8 dirn)
{
struct nhlt_fmt *fmt;
struct nhlt_endpoint *epnt;
struct hdac_bus *bus = ebus_to_hbus(&skl->ebus);
struct device *dev = bus->dev;
struct nhlt_specific_cfg *sp_config;
struct nhlt_acpi_table *nhlt = (struct nhlt_acpi_table *)skl->nhlt;
u16 bps = num_ch * s_fmt;
u8 j;
dump_config(dev, instance, link_type, s_fmt, num_ch, s_rate, dirn, bps);
epnt = (struct nhlt_endpoint *)nhlt->desc;
dev_dbg(dev, "endpoint count =%d\n", nhlt->endpoint_count);
for (j = 0; j < nhlt->endpoint_count; j++) {
if (skl_check_ep_match(dev, epnt, instance, link_type, dirn)) {
fmt = (struct nhlt_fmt *)(epnt->config.caps +
epnt->config.size);
sp_config = skl_get_specific_cfg(dev, fmt, num_ch, s_rate, bps);
if (sp_config)
return sp_config;
}
epnt = (struct nhlt_endpoint *)((u8 *)epnt + epnt->length);
}
return NULL;
}

View file

@ -0,0 +1,106 @@
/*
* skl-nhlt.h - Intel HDA Platform NHLT header
*
* Copyright (C) 2015 Intel Corp
* Author: Sanjiv Kumar <sanjiv.kumar@intel.com>
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
*/
#ifndef __SKL_NHLT_H__
#define __SKL_NHLT_H__
#include <linux/acpi.h>
struct wav_fmt {
u16 fmt_tag;
u16 channels;
u32 samples_per_sec;
u32 avg_bytes_per_sec;
u16 block_align;
u16 bits_per_sample;
u16 cb_size;
} __packed;
struct wav_fmt_ext {
struct wav_fmt fmt;
union samples {
u16 valid_bits_per_sample;
u16 samples_per_block;
u16 reserved;
} sample;
u32 channel_mask;
u8 sub_fmt[16];
} __packed;
enum nhlt_link_type {
NHLT_LINK_HDA = 0,
NHLT_LINK_DSP = 1,
NHLT_LINK_DMIC = 2,
NHLT_LINK_SSP = 3,
NHLT_LINK_INVALID
};
enum nhlt_device_type {
NHLT_DEVICE_BT = 0,
NHLT_DEVICE_DMIC = 1,
NHLT_DEVICE_I2S = 4,
NHLT_DEVICE_INVALID
};
struct nhlt_specific_cfg {
u32 size;
u8 caps[0];
} __packed;
struct nhlt_fmt_cfg {
struct wav_fmt_ext fmt_ext;
struct nhlt_specific_cfg config;
} __packed;
struct nhlt_fmt {
u8 fmt_count;
struct nhlt_fmt_cfg fmt_config[0];
} __packed;
struct nhlt_endpoint {
u32 length;
u8 linktype;
u8 instance_id;
u16 vendor_id;
u16 device_id;
u16 revision_id;
u32 subsystem_id;
u8 device_type;
u8 direction;
u8 virtual_bus_id;
struct nhlt_specific_cfg config;
} __packed;
struct nhlt_acpi_table {
struct acpi_table_header header;
u8 endpoint_count;
struct nhlt_endpoint desc[0];
} __packed;
struct nhlt_resource_desc {
u32 extra;
u16 flags;
u64 addr_spc_gra;
u64 min_addr;
u64 max_addr;
u64 addr_trans_offset;
u64 length;
} __packed;
#endif

View file

@ -0,0 +1,916 @@
/*
* skl-pcm.c -ASoC HDA Platform driver file implementing PCM functionality
*
* Copyright (C) 2014-2015 Intel Corp
* Author: Jeeja KP <jeeja.kp@intel.com>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
*/
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include "skl.h"
#define HDA_MONO 1
#define HDA_STEREO 2
static struct snd_pcm_hardware azx_pcm_hw = {
.info = (SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_PAUSE |
SNDRV_PCM_INFO_SYNC_START |
SNDRV_PCM_INFO_HAS_WALL_CLOCK | /* legacy */
SNDRV_PCM_INFO_HAS_LINK_ATIME |
SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_48000,
.rate_min = 48000,
.rate_max = 48000,
.channels_min = 2,
.channels_max = 2,
.buffer_bytes_max = AZX_MAX_BUF_SIZE,
.period_bytes_min = 128,
.period_bytes_max = AZX_MAX_BUF_SIZE / 2,
.periods_min = 2,
.periods_max = AZX_MAX_FRAG,
.fifo_size = 0,
};
static inline
struct hdac_ext_stream *get_hdac_ext_stream(struct snd_pcm_substream *substream)
{
return substream->runtime->private_data;
}
static struct hdac_ext_bus *get_bus_ctx(struct snd_pcm_substream *substream)
{
struct hdac_ext_stream *stream = get_hdac_ext_stream(substream);
struct hdac_stream *hstream = hdac_stream(stream);
struct hdac_bus *bus = hstream->bus;
return hbus_to_ebus(bus);
}
static int skl_substream_alloc_pages(struct hdac_ext_bus *ebus,
struct snd_pcm_substream *substream,
size_t size)
{
struct hdac_ext_stream *stream = get_hdac_ext_stream(substream);
hdac_stream(stream)->bufsize = 0;
hdac_stream(stream)->period_bytes = 0;
hdac_stream(stream)->format_val = 0;
return snd_pcm_lib_malloc_pages(substream, size);
}
static int skl_substream_free_pages(struct hdac_bus *bus,
struct snd_pcm_substream *substream)
{
return snd_pcm_lib_free_pages(substream);
}
static void skl_set_pcm_constrains(struct hdac_ext_bus *ebus,
struct snd_pcm_runtime *runtime)
{
snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
/* avoid wrap-around with wall-clock */
snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
20, 178000000);
}
static enum hdac_ext_stream_type skl_get_host_stream_type(struct hdac_ext_bus *ebus)
{
if (ebus->ppcap)
return HDAC_EXT_STREAM_TYPE_HOST;
else
return HDAC_EXT_STREAM_TYPE_COUPLED;
}
static int skl_pcm_open(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
struct hdac_ext_stream *stream;
struct snd_pcm_runtime *runtime = substream->runtime;
struct skl_dma_params *dma_params;
int ret;
dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
ret = pm_runtime_get_sync(dai->dev);
if (ret)
return ret;
stream = snd_hdac_ext_stream_assign(ebus, substream,
skl_get_host_stream_type(ebus));
if (stream == NULL)
return -EBUSY;
skl_set_pcm_constrains(ebus, runtime);
/*
* disable WALLCLOCK timestamps for capture streams
* until we figure out how to handle digital inputs
*/
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK; /* legacy */
runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_LINK_ATIME;
}
runtime->private_data = stream;
dma_params = kzalloc(sizeof(*dma_params), GFP_KERNEL);
if (!dma_params)
return -ENOMEM;
dma_params->stream_tag = hdac_stream(stream)->stream_tag;
snd_soc_dai_set_dma_data(dai, substream, dma_params);
dev_dbg(dai->dev, "stream tag set in dma params=%d\n",
dma_params->stream_tag);
snd_pcm_set_sync(substream);
return 0;
}
static int skl_get_format(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
struct skl_dma_params *dma_params;
struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
int format_val = 0;
if (ebus->ppcap) {
struct snd_pcm_runtime *runtime = substream->runtime;
format_val = snd_hdac_calc_stream_format(runtime->rate,
runtime->channels,
runtime->format,
32, 0);
} else {
struct snd_soc_dai *codec_dai = rtd->codec_dai;
dma_params = snd_soc_dai_get_dma_data(codec_dai, substream);
if (dma_params)
format_val = dma_params->format;
}
return format_val;
}
static int skl_pcm_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct hdac_ext_stream *stream = get_hdac_ext_stream(substream);
unsigned int format_val;
int err;
dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
if (hdac_stream(stream)->prepared) {
dev_dbg(dai->dev, "already stream is prepared - returning\n");
return 0;
}
format_val = skl_get_format(substream, dai);
dev_dbg(dai->dev, "stream_tag=%d formatvalue=%d\n",
hdac_stream(stream)->stream_tag, format_val);
snd_hdac_stream_reset(hdac_stream(stream));
err = snd_hdac_stream_set_params(hdac_stream(stream), format_val);
if (err < 0)
return err;
err = snd_hdac_stream_setup(hdac_stream(stream));
if (err < 0)
return err;
hdac_stream(stream)->prepared = 1;
return err;
}
static int skl_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
struct hdac_ext_stream *stream = get_hdac_ext_stream(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
int ret, dma_id;
dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
ret = skl_substream_alloc_pages(ebus, substream,
params_buffer_bytes(params));
if (ret < 0)
return ret;
dev_dbg(dai->dev, "format_val, rate=%d, ch=%d, format=%d\n",
runtime->rate, runtime->channels, runtime->format);
dma_id = hdac_stream(stream)->stream_tag - 1;
dev_dbg(dai->dev, "dma_id=%d\n", dma_id);
return 0;
}
static void skl_pcm_close(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct hdac_ext_stream *stream = get_hdac_ext_stream(substream);
struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
struct skl_dma_params *dma_params = NULL;
dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
snd_hdac_ext_stream_release(stream, skl_get_host_stream_type(ebus));
dma_params = snd_soc_dai_get_dma_data(dai, substream);
/*
* now we should set this to NULL as we are freeing by the
* dma_params
*/
snd_soc_dai_set_dma_data(dai, substream, NULL);
pm_runtime_mark_last_busy(dai->dev);
pm_runtime_put_autosuspend(dai->dev);
kfree(dma_params);
}
static int skl_pcm_hw_free(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
struct hdac_ext_stream *stream = get_hdac_ext_stream(substream);
dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
snd_hdac_stream_cleanup(hdac_stream(stream));
hdac_stream(stream)->prepared = 0;
return skl_substream_free_pages(ebus_to_hbus(ebus), substream);
}
static int skl_link_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
struct hdac_ext_stream *link_dev;
struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
struct skl_dma_params *dma_params;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
int dma_id;
pr_debug("%s\n", __func__);
link_dev = snd_hdac_ext_stream_assign(ebus, substream,
HDAC_EXT_STREAM_TYPE_LINK);
if (!link_dev)
return -EBUSY;
snd_soc_dai_set_dma_data(dai, substream, (void *)link_dev);
/* set the stream tag in the codec dai dma params */
dma_params = (struct skl_dma_params *)
snd_soc_dai_get_dma_data(codec_dai, substream);
if (dma_params)
dma_params->stream_tag = hdac_stream(link_dev)->stream_tag;
snd_soc_dai_set_dma_data(codec_dai, substream, (void *)dma_params);
dma_id = hdac_stream(link_dev)->stream_tag - 1;
return 0;
}
static int skl_link_pcm_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
struct hdac_ext_stream *link_dev =
snd_soc_dai_get_dma_data(dai, substream);
unsigned int format_val = 0;
struct skl_dma_params *dma_params;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
struct snd_pcm_hw_params *params;
struct snd_interval *channels, *rate;
struct hdac_ext_link *link;
dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
if (link_dev->link_prepared) {
dev_dbg(dai->dev, "already stream is prepared - returning\n");
return 0;
}
params = devm_kzalloc(dai->dev, sizeof(*params), GFP_KERNEL);
if (params == NULL)
return -ENOMEM;
channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
channels->min = channels->max = substream->runtime->channels;
rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
rate->min = rate->max = substream->runtime->rate;
snd_mask_set(&params->masks[SNDRV_PCM_HW_PARAM_FORMAT -
SNDRV_PCM_HW_PARAM_FIRST_MASK],
substream->runtime->format);
dma_params = (struct skl_dma_params *)
snd_soc_dai_get_dma_data(codec_dai, substream);
if (dma_params)
format_val = dma_params->format;
dev_dbg(dai->dev, "stream_tag=%d formatvalue=%d codec_dai_name=%s\n",
hdac_stream(link_dev)->stream_tag, format_val, codec_dai->name);
snd_hdac_ext_link_stream_reset(link_dev);
snd_hdac_ext_link_stream_setup(link_dev, format_val);
link = snd_hdac_ext_bus_get_link(ebus, rtd->codec->component.name);
if (!link)
return -EINVAL;
snd_hdac_ext_link_set_stream_id(link, hdac_stream(link_dev)->stream_tag);
link_dev->link_prepared = 1;
return 0;
}
static int skl_link_pcm_trigger(struct snd_pcm_substream *substream,
int cmd, struct snd_soc_dai *dai)
{
struct hdac_ext_stream *link_dev =
snd_soc_dai_get_dma_data(dai, substream);
dev_dbg(dai->dev, "In %s cmd=%d\n", __func__, cmd);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
case SNDRV_PCM_TRIGGER_RESUME:
snd_hdac_ext_link_stream_start(link_dev);
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_STOP:
snd_hdac_ext_link_stream_clear(link_dev);
break;
default:
return -EINVAL;
}
return 0;
}
static int skl_link_hw_free(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
struct hdac_ext_stream *link_dev =
snd_soc_dai_get_dma_data(dai, substream);
struct hdac_ext_link *link;
dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
link_dev->link_prepared = 0;
link = snd_hdac_ext_bus_get_link(ebus, rtd->codec->component.name);
if (!link)
return -EINVAL;
snd_hdac_ext_link_clear_stream_id(link, hdac_stream(link_dev)->stream_tag);
snd_hdac_ext_stream_release(link_dev, HDAC_EXT_STREAM_TYPE_LINK);
return 0;
}
static int skl_hda_be_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
return pm_runtime_get_sync(dai->dev);
}
static void skl_hda_be_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
pm_runtime_mark_last_busy(dai->dev);
pm_runtime_put_autosuspend(dai->dev);
}
static struct snd_soc_dai_ops skl_pcm_dai_ops = {
.startup = skl_pcm_open,
.shutdown = skl_pcm_close,
.prepare = skl_pcm_prepare,
.hw_params = skl_pcm_hw_params,
.hw_free = skl_pcm_hw_free,
};
static struct snd_soc_dai_ops skl_dmic_dai_ops = {
.startup = skl_hda_be_startup,
.shutdown = skl_hda_be_shutdown,
};
static struct snd_soc_dai_ops skl_link_dai_ops = {
.startup = skl_hda_be_startup,
.prepare = skl_link_pcm_prepare,
.hw_params = skl_link_hw_params,
.hw_free = skl_link_hw_free,
.trigger = skl_link_pcm_trigger,
.shutdown = skl_hda_be_shutdown,
};
static struct snd_soc_dai_driver skl_platform_dai[] = {
{
.name = "System Pin",
.ops = &skl_pcm_dai_ops,
.playback = {
.stream_name = "System Playback",
.channels_min = HDA_MONO,
.channels_max = HDA_STEREO,
.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_8000,
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
},
.capture = {
.stream_name = "System Capture",
.channels_min = HDA_MONO,
.channels_max = HDA_STEREO,
.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_16000,
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
},
},
{
.name = "Reference Pin",
.ops = &skl_pcm_dai_ops,
.capture = {
.stream_name = "Reference Capture",
.channels_min = HDA_MONO,
.channels_max = HDA_STEREO,
.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_16000,
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
},
},
{
.name = "Deepbuffer Pin",
.ops = &skl_pcm_dai_ops,
.playback = {
.stream_name = "Deepbuffer Playback",
.channels_min = HDA_STEREO,
.channels_max = HDA_STEREO,
.rates = SNDRV_PCM_RATE_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
},
},
{
.name = "LowLatency Pin",
.ops = &skl_pcm_dai_ops,
.playback = {
.stream_name = "Low Latency Playback",
.channels_min = HDA_STEREO,
.channels_max = HDA_STEREO,
.rates = SNDRV_PCM_RATE_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
},
},
/* BE CPU Dais */
{
.name = "iDisp Pin",
.ops = &skl_link_dai_ops,
.playback = {
.stream_name = "iDisp Tx",
.channels_min = HDA_STEREO,
.channels_max = HDA_STEREO,
.rates = SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_16000|SNDRV_PCM_RATE_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
},
},
{
.name = "DMIC01 Pin",
.ops = &skl_dmic_dai_ops,
.capture = {
.stream_name = "DMIC01 Rx",
.channels_min = HDA_STEREO,
.channels_max = HDA_STEREO,
.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_16000,
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
},
},
{
.name = "DMIC23 Pin",
.ops = &skl_dmic_dai_ops,
.capture = {
.stream_name = "DMIC23 Rx",
.channels_min = HDA_STEREO,
.channels_max = HDA_STEREO,
.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_16000,
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
},
},
{
.name = "HD-Codec Pin",
.ops = &skl_link_dai_ops,
.playback = {
.stream_name = "HD-Codec Tx",
.channels_min = HDA_STEREO,
.channels_max = HDA_STEREO,
.rates = SNDRV_PCM_RATE_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
},
.capture = {
.stream_name = "HD-Codec Rx",
.channels_min = HDA_STEREO,
.channels_max = HDA_STEREO,
.rates = SNDRV_PCM_RATE_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
},
},
{
.name = "HD-Codec-SPK Pin",
.ops = &skl_link_dai_ops,
.playback = {
.stream_name = "HD-Codec-SPK Tx",
.channels_min = HDA_STEREO,
.channels_max = HDA_STEREO,
.rates = SNDRV_PCM_RATE_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
},
},
{
.name = "HD-Codec-AMIC Pin",
.ops = &skl_link_dai_ops,
.capture = {
.stream_name = "HD-Codec-AMIC Rx",
.channels_min = HDA_STEREO,
.channels_max = HDA_STEREO,
.rates = SNDRV_PCM_RATE_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
},
},
};
static int skl_platform_open(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai_link *dai_link = rtd->dai_link;
dev_dbg(rtd->cpu_dai->dev, "In %s:%s\n", __func__,
dai_link->cpu_dai_name);
runtime = substream->runtime;
snd_soc_set_runtime_hwparams(substream, &azx_pcm_hw);
return 0;
}
static int skl_pcm_trigger(struct snd_pcm_substream *substream,
int cmd)
{
struct hdac_ext_bus *ebus = get_bus_ctx(substream);
struct hdac_bus *bus = ebus_to_hbus(ebus);
struct hdac_ext_stream *stream;
struct snd_pcm_substream *s;
bool start;
int sbits = 0;
unsigned long cookie;
struct hdac_stream *hstr;
stream = get_hdac_ext_stream(substream);
hstr = hdac_stream(stream);
dev_dbg(bus->dev, "In %s cmd=%d\n", __func__, cmd);
if (!hstr->prepared)
return -EPIPE;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
case SNDRV_PCM_TRIGGER_RESUME:
start = true;
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_STOP:
start = false;
break;
default:
return -EINVAL;
}
snd_pcm_group_for_each_entry(s, substream) {
if (s->pcm->card != substream->pcm->card)
continue;
stream = get_hdac_ext_stream(s);
sbits |= 1 << hdac_stream(stream)->index;
snd_pcm_trigger_done(s, substream);
}
spin_lock_irqsave(&bus->reg_lock, cookie);
/* first, set SYNC bits of corresponding streams */
snd_hdac_stream_sync_trigger(hstr, true, sbits, AZX_REG_SSYNC);
snd_pcm_group_for_each_entry(s, substream) {
if (s->pcm->card != substream->pcm->card)
continue;
stream = get_hdac_ext_stream(s);
if (start)
snd_hdac_stream_start(hdac_stream(stream), true);
else
snd_hdac_stream_stop(hdac_stream(stream));
}
spin_unlock_irqrestore(&bus->reg_lock, cookie);
snd_hdac_stream_sync(hstr, start, sbits);
spin_lock_irqsave(&bus->reg_lock, cookie);
/* reset SYNC bits */
snd_hdac_stream_sync_trigger(hstr, false, sbits, AZX_REG_SSYNC);
if (start)
snd_hdac_stream_timecounter_init(hstr, sbits);
spin_unlock_irqrestore(&bus->reg_lock, cookie);
return 0;
}
static int skl_dsp_trigger(struct snd_pcm_substream *substream,
int cmd)
{
struct hdac_ext_bus *ebus = get_bus_ctx(substream);
struct hdac_bus *bus = ebus_to_hbus(ebus);
struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct hdac_ext_stream *stream;
int start;
unsigned long cookie;
struct hdac_stream *hstr;
dev_dbg(bus->dev, "In %s cmd=%d streamname=%s\n", __func__, cmd, cpu_dai->name);
stream = get_hdac_ext_stream(substream);
hstr = hdac_stream(stream);
if (!hstr->prepared)
return -EPIPE;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
case SNDRV_PCM_TRIGGER_RESUME:
start = 1;
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_STOP:
start = 0;
break;
default:
return -EINVAL;
}
spin_lock_irqsave(&bus->reg_lock, cookie);
if (start)
snd_hdac_stream_start(hdac_stream(stream), true);
else
snd_hdac_stream_stop(hdac_stream(stream));
if (start)
snd_hdac_stream_timecounter_init(hstr, 0);
spin_unlock_irqrestore(&bus->reg_lock, cookie);
return 0;
}
static int skl_platform_pcm_trigger(struct snd_pcm_substream *substream,
int cmd)
{
struct hdac_ext_bus *ebus = get_bus_ctx(substream);
if (ebus->ppcap)
return skl_dsp_trigger(substream, cmd);
else
return skl_pcm_trigger(substream, cmd);
}
/* calculate runtime delay from LPIB */
static int skl_get_delay_from_lpib(struct hdac_ext_bus *ebus,
struct hdac_ext_stream *sstream,
unsigned int pos)
{
struct hdac_bus *bus = ebus_to_hbus(ebus);
struct hdac_stream *hstream = hdac_stream(sstream);
struct snd_pcm_substream *substream = hstream->substream;
int stream = substream->stream;
unsigned int lpib_pos = snd_hdac_stream_get_pos_lpib(hstream);
int delay;
if (stream == SNDRV_PCM_STREAM_PLAYBACK)
delay = pos - lpib_pos;
else
delay = lpib_pos - pos;
if (delay < 0) {
if (delay >= hstream->delay_negative_threshold)
delay = 0;
else
delay += hstream->bufsize;
}
if (delay >= hstream->period_bytes) {
dev_info(bus->dev,
"Unstable LPIB (%d >= %d); disabling LPIB delay counting\n",
delay, hstream->period_bytes);
delay = 0;
}
return bytes_to_frames(substream->runtime, delay);
}
static unsigned int skl_get_position(struct hdac_ext_stream *hstream,
int codec_delay)
{
struct hdac_stream *hstr = hdac_stream(hstream);
struct snd_pcm_substream *substream = hstr->substream;
struct hdac_ext_bus *ebus = get_bus_ctx(substream);
unsigned int pos;
int delay;
/* use the position buffer as default */
pos = snd_hdac_stream_get_pos_posbuf(hdac_stream(hstream));
if (pos >= hdac_stream(hstream)->bufsize)
pos = 0;
if (substream->runtime) {
delay = skl_get_delay_from_lpib(ebus, hstream, pos)
+ codec_delay;
substream->runtime->delay += delay;
}
return pos;
}
static snd_pcm_uframes_t skl_platform_pcm_pointer
(struct snd_pcm_substream *substream)
{
struct hdac_ext_stream *hstream = get_hdac_ext_stream(substream);
return bytes_to_frames(substream->runtime,
skl_get_position(hstream, 0));
}
static u64 skl_adjust_codec_delay(struct snd_pcm_substream *substream,
u64 nsec)
{
struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
struct snd_soc_dai *codec_dai = rtd->codec_dai;
u64 codec_frames, codec_nsecs;
if (!codec_dai->driver->ops->delay)
return nsec;
codec_frames = codec_dai->driver->ops->delay(substream, codec_dai);
codec_nsecs = div_u64(codec_frames * 1000000000LL,
substream->runtime->rate);
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
return nsec + codec_nsecs;
return (nsec > codec_nsecs) ? nsec - codec_nsecs : 0;
}
static int skl_get_time_info(struct snd_pcm_substream *substream,
struct timespec *system_ts, struct timespec *audio_ts,
struct snd_pcm_audio_tstamp_config *audio_tstamp_config,
struct snd_pcm_audio_tstamp_report *audio_tstamp_report)
{
struct hdac_ext_stream *sstream = get_hdac_ext_stream(substream);
struct hdac_stream *hstr = hdac_stream(sstream);
u64 nsec;
if ((substream->runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_ATIME) &&
(audio_tstamp_config->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK)) {
snd_pcm_gettime(substream->runtime, system_ts);
nsec = timecounter_read(&hstr->tc);
nsec = div_u64(nsec, 3); /* can be optimized */
if (audio_tstamp_config->report_delay)
nsec = skl_adjust_codec_delay(substream, nsec);
*audio_ts = ns_to_timespec(nsec);
audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK;
audio_tstamp_report->accuracy_report = 1; /* rest of struct is valid */
audio_tstamp_report->accuracy = 42; /* 24MHzWallClk == 42ns resolution */
} else {
audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT;
}
return 0;
}
static struct snd_pcm_ops skl_platform_ops = {
.open = skl_platform_open,
.ioctl = snd_pcm_lib_ioctl,
.trigger = skl_platform_pcm_trigger,
.pointer = skl_platform_pcm_pointer,
.get_time_info = skl_get_time_info,
.mmap = snd_pcm_lib_default_mmap,
.page = snd_pcm_sgbuf_ops_page,
};
static void skl_pcm_free(struct snd_pcm *pcm)
{
snd_pcm_lib_preallocate_free_for_all(pcm);
}
#define MAX_PREALLOC_SIZE (32 * 1024 * 1024)
static int skl_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_dai *dai = rtd->cpu_dai;
struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
struct snd_pcm *pcm = rtd->pcm;
unsigned int size;
int retval = 0;
struct skl *skl = ebus_to_skl(ebus);
if (dai->driver->playback.channels_min ||
dai->driver->capture.channels_min) {
/* buffer pre-allocation */
size = CONFIG_SND_HDA_PREALLOC_SIZE * 1024;
if (size > MAX_PREALLOC_SIZE)
size = MAX_PREALLOC_SIZE;
retval = snd_pcm_lib_preallocate_pages_for_all(pcm,
SNDRV_DMA_TYPE_DEV_SG,
snd_dma_pci_data(skl->pci),
size, MAX_PREALLOC_SIZE);
if (retval) {
dev_err(dai->dev, "dma buffer allocationf fail\n");
return retval;
}
}
return retval;
}
static struct snd_soc_platform_driver skl_platform_drv = {
.ops = &skl_platform_ops,
.pcm_new = skl_pcm_new,
.pcm_free = skl_pcm_free,
};
static const struct snd_soc_component_driver skl_component = {
.name = "pcm",
};
int skl_platform_register(struct device *dev)
{
int ret;
ret = snd_soc_register_platform(dev, &skl_platform_drv);
if (ret) {
dev_err(dev, "soc platform registration failed %d\n", ret);
return ret;
}
ret = snd_soc_register_component(dev, &skl_component,
skl_platform_dai,
ARRAY_SIZE(skl_platform_dai));
if (ret) {
dev_err(dev, "soc component registration failed %d\n", ret);
snd_soc_unregister_platform(dev);
}
return ret;
}
int skl_platform_unregister(struct device *dev)
{
snd_soc_unregister_component(dev);
snd_soc_unregister_platform(dev);
return 0;
}

View file

@ -0,0 +1,327 @@
/*
* skl-sst-cldma.c - Code Loader DMA handler
*
* Copyright (C) 2015, Intel Corporation.
* Author: Subhransu S. Prusty <subhransu.s.prusty@intel.com>
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/device.h>
#include <linux/mm.h>
#include <linux/kthread.h>
#include "../common/sst-dsp.h"
#include "../common/sst-dsp-priv.h"
static void skl_cldma_int_enable(struct sst_dsp *ctx)
{
sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPIC,
SKL_ADSPIC_CL_DMA, SKL_ADSPIC_CL_DMA);
}
void skl_cldma_int_disable(struct sst_dsp *ctx)
{
sst_dsp_shim_update_bits_unlocked(ctx,
SKL_ADSP_REG_ADSPIC, SKL_ADSPIC_CL_DMA, 0);
}
/* Code loader helper APIs */
static void skl_cldma_setup_bdle(struct sst_dsp *ctx,
struct snd_dma_buffer *dmab_data,
u32 **bdlp, int size, int with_ioc)
{
u32 *bdl = *bdlp;
ctx->cl_dev.frags = 0;
while (size > 0) {
phys_addr_t addr = virt_to_phys(dmab_data->area +
(ctx->cl_dev.frags * ctx->cl_dev.bufsize));
bdl[0] = cpu_to_le32(lower_32_bits(addr));
bdl[1] = cpu_to_le32(upper_32_bits(addr));
bdl[2] = cpu_to_le32(ctx->cl_dev.bufsize);
size -= ctx->cl_dev.bufsize;
bdl[3] = (size || !with_ioc) ? 0 : cpu_to_le32(0x01);
bdl += 4;
ctx->cl_dev.frags++;
}
}
/*
* Setup controller
* Configure the registers to update the dma buffer address and
* enable interrupts.
* Note: Using the channel 1 for transfer
*/
static void skl_cldma_setup_controller(struct sst_dsp *ctx,
struct snd_dma_buffer *dmab_bdl, unsigned int max_size,
u32 count)
{
sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPL,
CL_SD_BDLPLBA(dmab_bdl->addr));
sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPU,
CL_SD_BDLPUBA(dmab_bdl->addr));
sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_CBL, max_size);
sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_LVI, count - 1);
sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
CL_SD_CTL_IOCE_MASK, CL_SD_CTL_IOCE(1));
sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
CL_SD_CTL_FEIE_MASK, CL_SD_CTL_FEIE(1));
sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
CL_SD_CTL_DEIE_MASK, CL_SD_CTL_DEIE(1));
sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
CL_SD_CTL_STRM_MASK, CL_SD_CTL_STRM(FW_CL_STREAM_NUMBER));
}
static void skl_cldma_setup_spb(struct sst_dsp *ctx,
unsigned int size, bool enable)
{
if (enable)
sst_dsp_shim_update_bits_unlocked(ctx,
SKL_ADSP_REG_CL_SPBFIFO_SPBFCCTL,
CL_SPBFIFO_SPBFCCTL_SPIBE_MASK,
CL_SPBFIFO_SPBFCCTL_SPIBE(1));
sst_dsp_shim_write_unlocked(ctx, SKL_ADSP_REG_CL_SPBFIFO_SPIB, size);
}
static void skl_cldma_cleanup_spb(struct sst_dsp *ctx)
{
sst_dsp_shim_update_bits_unlocked(ctx,
SKL_ADSP_REG_CL_SPBFIFO_SPBFCCTL,
CL_SPBFIFO_SPBFCCTL_SPIBE_MASK,
CL_SPBFIFO_SPBFCCTL_SPIBE(0));
sst_dsp_shim_write_unlocked(ctx, SKL_ADSP_REG_CL_SPBFIFO_SPIB, 0);
}
static void skl_cldma_trigger(struct sst_dsp *ctx, bool enable)
{
if (enable)
sst_dsp_shim_update_bits_unlocked(ctx,
SKL_ADSP_REG_CL_SD_CTL,
CL_SD_CTL_RUN_MASK, CL_SD_CTL_RUN(1));
else
sst_dsp_shim_update_bits_unlocked(ctx,
SKL_ADSP_REG_CL_SD_CTL,
CL_SD_CTL_RUN_MASK, CL_SD_CTL_RUN(0));
}
static void skl_cldma_cleanup(struct sst_dsp *ctx)
{
skl_cldma_cleanup_spb(ctx);
sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
CL_SD_CTL_IOCE_MASK, CL_SD_CTL_IOCE(0));
sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
CL_SD_CTL_FEIE_MASK, CL_SD_CTL_FEIE(0));
sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
CL_SD_CTL_DEIE_MASK, CL_SD_CTL_DEIE(0));
sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
CL_SD_CTL_STRM_MASK, CL_SD_CTL_STRM(0));
sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPL, CL_SD_BDLPLBA(0));
sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPU, 0);
sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_CBL, 0);
sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_LVI, 0);
}
static int skl_cldma_wait_interruptible(struct sst_dsp *ctx)
{
int ret = 0;
if (!wait_event_timeout(ctx->cl_dev.wait_queue,
ctx->cl_dev.wait_condition,
msecs_to_jiffies(SKL_WAIT_TIMEOUT))) {
dev_err(ctx->dev, "%s: Wait timeout\n", __func__);
ret = -EIO;
goto cleanup;
}
dev_dbg(ctx->dev, "%s: Event wake\n", __func__);
if (ctx->cl_dev.wake_status != SKL_CL_DMA_BUF_COMPLETE) {
dev_err(ctx->dev, "%s: DMA Error\n", __func__);
ret = -EIO;
}
cleanup:
ctx->cl_dev.wake_status = SKL_CL_DMA_STATUS_NONE;
return ret;
}
static void skl_cldma_stop(struct sst_dsp *ctx)
{
ctx->cl_dev.ops.cl_trigger(ctx, false);
}
static void skl_cldma_fill_buffer(struct sst_dsp *ctx, unsigned int size,
const void *curr_pos, bool intr_enable, bool trigger)
{
dev_dbg(ctx->dev, "Size: %x, intr_enable: %d\n", size, intr_enable);
dev_dbg(ctx->dev, "buf_pos_index:%d, trigger:%d\n",
ctx->cl_dev.dma_buffer_offset, trigger);
dev_dbg(ctx->dev, "spib position: %d\n", ctx->cl_dev.curr_spib_pos);
memcpy(ctx->cl_dev.dmab_data.area + ctx->cl_dev.dma_buffer_offset,
curr_pos, size);
if (ctx->cl_dev.curr_spib_pos == ctx->cl_dev.bufsize)
ctx->cl_dev.dma_buffer_offset = 0;
else
ctx->cl_dev.dma_buffer_offset = ctx->cl_dev.curr_spib_pos;
ctx->cl_dev.wait_condition = false;
if (intr_enable)
skl_cldma_int_enable(ctx);
ctx->cl_dev.ops.cl_setup_spb(ctx, ctx->cl_dev.curr_spib_pos, trigger);
if (trigger)
ctx->cl_dev.ops.cl_trigger(ctx, true);
}
/*
* The CL dma doesn't have any way to update the transfer status until a BDL
* buffer is fully transferred
*
* So Copying is divided in two parts.
* 1. Interrupt on buffer done where the size to be transferred is more than
* ring buffer size.
* 2. Polling on fw register to identify if data left to transferred doesn't
* fill the ring buffer. Caller takes care of polling the required status
* register to identify the transfer status.
*/
static int
skl_cldma_copy_to_buf(struct sst_dsp *ctx, const void *bin, u32 total_size)
{
int ret = 0;
bool start = true;
unsigned int excess_bytes;
u32 size;
unsigned int bytes_left = total_size;
const void *curr_pos = bin;
if (total_size <= 0)
return -EINVAL;
dev_dbg(ctx->dev, "%s: Total binary size: %u\n", __func__, bytes_left);
while (bytes_left) {
if (bytes_left > ctx->cl_dev.bufsize) {
/*
* dma transfers only till the write pointer as
* updated in spib
*/
if (ctx->cl_dev.curr_spib_pos == 0)
ctx->cl_dev.curr_spib_pos = ctx->cl_dev.bufsize;
size = ctx->cl_dev.bufsize;
skl_cldma_fill_buffer(ctx, size, curr_pos, true, start);
start = false;
ret = skl_cldma_wait_interruptible(ctx);
if (ret < 0) {
skl_cldma_stop(ctx);
return ret;
}
} else {
skl_cldma_int_disable(ctx);
if ((ctx->cl_dev.curr_spib_pos + bytes_left)
<= ctx->cl_dev.bufsize) {
ctx->cl_dev.curr_spib_pos += bytes_left;
} else {
excess_bytes = bytes_left -
(ctx->cl_dev.bufsize -
ctx->cl_dev.curr_spib_pos);
ctx->cl_dev.curr_spib_pos = excess_bytes;
}
size = bytes_left;
skl_cldma_fill_buffer(ctx, size,
curr_pos, false, start);
}
bytes_left -= size;
curr_pos = curr_pos + size;
}
return ret;
}
void skl_cldma_process_intr(struct sst_dsp *ctx)
{
u8 cl_dma_intr_status;
cl_dma_intr_status =
sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_CL_SD_STS);
if (!(cl_dma_intr_status & SKL_CL_DMA_SD_INT_COMPLETE))
ctx->cl_dev.wake_status = SKL_CL_DMA_ERR;
else
ctx->cl_dev.wake_status = SKL_CL_DMA_BUF_COMPLETE;
ctx->cl_dev.wait_condition = true;
wake_up(&ctx->cl_dev.wait_queue);
}
int skl_cldma_prepare(struct sst_dsp *ctx)
{
int ret;
u32 *bdl;
ctx->cl_dev.bufsize = SKL_MAX_BUFFER_SIZE;
/* Allocate cl ops */
ctx->cl_dev.ops.cl_setup_bdle = skl_cldma_setup_bdle;
ctx->cl_dev.ops.cl_setup_controller = skl_cldma_setup_controller;
ctx->cl_dev.ops.cl_setup_spb = skl_cldma_setup_spb;
ctx->cl_dev.ops.cl_cleanup_spb = skl_cldma_cleanup_spb;
ctx->cl_dev.ops.cl_trigger = skl_cldma_trigger;
ctx->cl_dev.ops.cl_cleanup_controller = skl_cldma_cleanup;
ctx->cl_dev.ops.cl_copy_to_dmabuf = skl_cldma_copy_to_buf;
ctx->cl_dev.ops.cl_stop_dma = skl_cldma_stop;
/* Allocate buffer*/
ret = ctx->dsp_ops.alloc_dma_buf(ctx->dev,
&ctx->cl_dev.dmab_data, ctx->cl_dev.bufsize);
if (ret < 0) {
dev_err(ctx->dev, "Alloc buffer for base fw failed: %x", ret);
return ret;
}
/* Setup Code loader BDL */
ret = ctx->dsp_ops.alloc_dma_buf(ctx->dev,
&ctx->cl_dev.dmab_bdl, PAGE_SIZE);
if (ret < 0) {
dev_err(ctx->dev, "Alloc buffer for blde failed: %x", ret);
ctx->dsp_ops.free_dma_buf(ctx->dev, &ctx->cl_dev.dmab_data);
return ret;
}
bdl = (u32 *)ctx->cl_dev.dmab_bdl.area;
/* Allocate BDLs */
ctx->cl_dev.ops.cl_setup_bdle(ctx, &ctx->cl_dev.dmab_data,
&bdl, ctx->cl_dev.bufsize, 1);
ctx->cl_dev.ops.cl_setup_controller(ctx, &ctx->cl_dev.dmab_bdl,
ctx->cl_dev.bufsize, ctx->cl_dev.frags);
ctx->cl_dev.curr_spib_pos = 0;
ctx->cl_dev.dma_buffer_offset = 0;
init_waitqueue_head(&ctx->cl_dev.wait_queue);
return ret;
}

View file

@ -0,0 +1,251 @@
/*
* Intel Code Loader DMA support
*
* Copyright (C) 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#ifndef SKL_SST_CLDMA_H_
#define SKL_SST_CLDMA_H_
#define FW_CL_STREAM_NUMBER 0x1
#define DMA_ADDRESS_128_BITS_ALIGNMENT 7
#define BDL_ALIGN(x) (x >> DMA_ADDRESS_128_BITS_ALIGNMENT)
#define SKL_ADSPIC_CL_DMA 0x2
#define SKL_ADSPIS_CL_DMA 0x2
#define SKL_CL_DMA_SD_INT_DESC_ERR 0x10 /* Descriptor error interrupt */
#define SKL_CL_DMA_SD_INT_FIFO_ERR 0x08 /* FIFO error interrupt */
#define SKL_CL_DMA_SD_INT_COMPLETE 0x04 /* Buffer completion interrupt */
/* Intel HD Audio Code Loader DMA Registers */
#define HDA_ADSP_LOADER_BASE 0x80
/* Stream Registers */
#define SKL_ADSP_REG_CL_SD_CTL (HDA_ADSP_LOADER_BASE + 0x00)
#define SKL_ADSP_REG_CL_SD_STS (HDA_ADSP_LOADER_BASE + 0x03)
#define SKL_ADSP_REG_CL_SD_LPIB (HDA_ADSP_LOADER_BASE + 0x04)
#define SKL_ADSP_REG_CL_SD_CBL (HDA_ADSP_LOADER_BASE + 0x08)
#define SKL_ADSP_REG_CL_SD_LVI (HDA_ADSP_LOADER_BASE + 0x0c)
#define SKL_ADSP_REG_CL_SD_FIFOW (HDA_ADSP_LOADER_BASE + 0x0e)
#define SKL_ADSP_REG_CL_SD_FIFOSIZE (HDA_ADSP_LOADER_BASE + 0x10)
#define SKL_ADSP_REG_CL_SD_FORMAT (HDA_ADSP_LOADER_BASE + 0x12)
#define SKL_ADSP_REG_CL_SD_FIFOL (HDA_ADSP_LOADER_BASE + 0x14)
#define SKL_ADSP_REG_CL_SD_BDLPL (HDA_ADSP_LOADER_BASE + 0x18)
#define SKL_ADSP_REG_CL_SD_BDLPU (HDA_ADSP_LOADER_BASE + 0x1c)
/* CL: Software Position Based FIFO Capability Registers */
#define SKL_ADSP_REG_CL_SPBFIFO (HDA_ADSP_LOADER_BASE + 0x20)
#define SKL_ADSP_REG_CL_SPBFIFO_SPBFCH (SKL_ADSP_REG_CL_SPBFIFO + 0x0)
#define SKL_ADSP_REG_CL_SPBFIFO_SPBFCCTL (SKL_ADSP_REG_CL_SPBFIFO + 0x4)
#define SKL_ADSP_REG_CL_SPBFIFO_SPIB (SKL_ADSP_REG_CL_SPBFIFO + 0x8)
#define SKL_ADSP_REG_CL_SPBFIFO_MAXFIFOS (SKL_ADSP_REG_CL_SPBFIFO + 0xc)
/* CL: Stream Descriptor x Control */
/* Stream Reset */
#define CL_SD_CTL_SRST_SHIFT 0
#define CL_SD_CTL_SRST_MASK (1 << CL_SD_CTL_SRST_SHIFT)
#define CL_SD_CTL_SRST(x) \
((x << CL_SD_CTL_SRST_SHIFT) & CL_SD_CTL_SRST_MASK)
/* Stream Run */
#define CL_SD_CTL_RUN_SHIFT 1
#define CL_SD_CTL_RUN_MASK (1 << CL_SD_CTL_RUN_SHIFT)
#define CL_SD_CTL_RUN(x) \
((x << CL_SD_CTL_RUN_SHIFT) & CL_SD_CTL_RUN_MASK)
/* Interrupt On Completion Enable */
#define CL_SD_CTL_IOCE_SHIFT 2
#define CL_SD_CTL_IOCE_MASK (1 << CL_SD_CTL_IOCE_SHIFT)
#define CL_SD_CTL_IOCE(x) \
((x << CL_SD_CTL_IOCE_SHIFT) & CL_SD_CTL_IOCE_MASK)
/* FIFO Error Interrupt Enable */
#define CL_SD_CTL_FEIE_SHIFT 3
#define CL_SD_CTL_FEIE_MASK (1 << CL_SD_CTL_FEIE_SHIFT)
#define CL_SD_CTL_FEIE(x) \
((x << CL_SD_CTL_FEIE_SHIFT) & CL_SD_CTL_FEIE_MASK)
/* Descriptor Error Interrupt Enable */
#define CL_SD_CTL_DEIE_SHIFT 4
#define CL_SD_CTL_DEIE_MASK (1 << CL_SD_CTL_DEIE_SHIFT)
#define CL_SD_CTL_DEIE(x) \
((x << CL_SD_CTL_DEIE_SHIFT) & CL_SD_CTL_DEIE_MASK)
/* FIFO Limit Change */
#define CL_SD_CTL_FIFOLC_SHIFT 5
#define CL_SD_CTL_FIFOLC_MASK (1 << CL_SD_CTL_FIFOLC_SHIFT)
#define CL_SD_CTL_FIFOLC(x) \
((x << CL_SD_CTL_FIFOLC_SHIFT) & CL_SD_CTL_FIFOLC_MASK)
/* Stripe Control */
#define CL_SD_CTL_STRIPE_SHIFT 16
#define CL_SD_CTL_STRIPE_MASK (0x3 << CL_SD_CTL_STRIPE_SHIFT)
#define CL_SD_CTL_STRIPE(x) \
((x << CL_SD_CTL_STRIPE_SHIFT) & CL_SD_CTL_STRIPE_MASK)
/* Traffic Priority */
#define CL_SD_CTL_TP_SHIFT 18
#define CL_SD_CTL_TP_MASK (1 << CL_SD_CTL_TP_SHIFT)
#define CL_SD_CTL_TP(x) \
((x << CL_SD_CTL_TP_SHIFT) & CL_SD_CTL_TP_MASK)
/* Bidirectional Direction Control */
#define CL_SD_CTL_DIR_SHIFT 19
#define CL_SD_CTL_DIR_MASK (1 << CL_SD_CTL_DIR_SHIFT)
#define CL_SD_CTL_DIR(x) \
((x << CL_SD_CTL_DIR_SHIFT) & CL_SD_CTL_DIR_MASK)
/* Stream Number */
#define CL_SD_CTL_STRM_SHIFT 20
#define CL_SD_CTL_STRM_MASK (0xf << CL_SD_CTL_STRM_SHIFT)
#define CL_SD_CTL_STRM(x) \
((x << CL_SD_CTL_STRM_SHIFT) & CL_SD_CTL_STRM_MASK)
/* CL: Stream Descriptor x Status */
/* Buffer Completion Interrupt Status */
#define CL_SD_STS_BCIS(x) CL_SD_CTL_IOCE(x)
/* FIFO Error */
#define CL_SD_STS_FIFOE(x) CL_SD_CTL_FEIE(x)
/* Descriptor Error */
#define CL_SD_STS_DESE(x) CL_SD_CTL_DEIE(x)
/* FIFO Ready */
#define CL_SD_STS_FIFORDY(x) CL_SD_CTL_FIFOLC(x)
/* CL: Stream Descriptor x Last Valid Index */
#define CL_SD_LVI_SHIFT 0
#define CL_SD_LVI_MASK (0xff << CL_SD_LVI_SHIFT)
#define CL_SD_LVI(x) ((x << CL_SD_LVI_SHIFT) & CL_SD_LVI_MASK)
/* CL: Stream Descriptor x FIFO Eviction Watermark */
#define CL_SD_FIFOW_SHIFT 0
#define CL_SD_FIFOW_MASK (0x7 << CL_SD_FIFOW_SHIFT)
#define CL_SD_FIFOW(x) \
((x << CL_SD_FIFOW_SHIFT) & CL_SD_FIFOW_MASK)
/* CL: Stream Descriptor x Buffer Descriptor List Pointer Lower Base Address */
/* Protect Bits */
#define CL_SD_BDLPLBA_PROT_SHIFT 0
#define CL_SD_BDLPLBA_PROT_MASK (1 << CL_SD_BDLPLBA_PROT_SHIFT)
#define CL_SD_BDLPLBA_PROT(x) \
((x << CL_SD_BDLPLBA_PROT_SHIFT) & CL_SD_BDLPLBA_PROT_MASK)
/* Buffer Descriptor List Lower Base Address */
#define CL_SD_BDLPLBA_SHIFT 7
#define CL_SD_BDLPLBA_MASK (0x1ffffff << CL_SD_BDLPLBA_SHIFT)
#define CL_SD_BDLPLBA(x) \
((BDL_ALIGN(lower_32_bits(x)) << CL_SD_BDLPLBA_SHIFT) & CL_SD_BDLPLBA_MASK)
/* Buffer Descriptor List Upper Base Address */
#define CL_SD_BDLPUBA_SHIFT 0
#define CL_SD_BDLPUBA_MASK (0xffffffff << CL_SD_BDLPUBA_SHIFT)
#define CL_SD_BDLPUBA(x) \
((upper_32_bits(x) << CL_SD_BDLPUBA_SHIFT) & CL_SD_BDLPUBA_MASK)
/*
* Code Loader - Software Position Based FIFO
* Capability Registers x Software Position Based FIFO Header
*/
/* Next Capability Pointer */
#define CL_SPBFIFO_SPBFCH_PTR_SHIFT 0
#define CL_SPBFIFO_SPBFCH_PTR_MASK (0xff << CL_SPBFIFO_SPBFCH_PTR_SHIFT)
#define CL_SPBFIFO_SPBFCH_PTR(x) \
((x << CL_SPBFIFO_SPBFCH_PTR_SHIFT) & CL_SPBFIFO_SPBFCH_PTR_MASK)
/* Capability Identifier */
#define CL_SPBFIFO_SPBFCH_ID_SHIFT 16
#define CL_SPBFIFO_SPBFCH_ID_MASK (0xfff << CL_SPBFIFO_SPBFCH_ID_SHIFT)
#define CL_SPBFIFO_SPBFCH_ID(x) \
((x << CL_SPBFIFO_SPBFCH_ID_SHIFT) & CL_SPBFIFO_SPBFCH_ID_MASK)
/* Capability Version */
#define CL_SPBFIFO_SPBFCH_VER_SHIFT 28
#define CL_SPBFIFO_SPBFCH_VER_MASK (0xf << CL_SPBFIFO_SPBFCH_VER_SHIFT)
#define CL_SPBFIFO_SPBFCH_VER(x) \
((x << CL_SPBFIFO_SPBFCH_VER_SHIFT) & CL_SPBFIFO_SPBFCH_VER_MASK)
/* Software Position in Buffer Enable */
#define CL_SPBFIFO_SPBFCCTL_SPIBE_SHIFT 0
#define CL_SPBFIFO_SPBFCCTL_SPIBE_MASK (1 << CL_SPBFIFO_SPBFCCTL_SPIBE_SHIFT)
#define CL_SPBFIFO_SPBFCCTL_SPIBE(x) \
((x << CL_SPBFIFO_SPBFCCTL_SPIBE_SHIFT) & CL_SPBFIFO_SPBFCCTL_SPIBE_MASK)
/* SST IPC SKL defines */
#define SKL_WAIT_TIMEOUT 500 /* 500 msec */
#define SKL_MAX_BUFFER_SIZE (32 * PAGE_SIZE)
enum skl_cl_dma_wake_states {
SKL_CL_DMA_STATUS_NONE = 0,
SKL_CL_DMA_BUF_COMPLETE,
SKL_CL_DMA_ERR, /* TODO: Expand the error states */
};
struct sst_dsp;
struct skl_cl_dev_ops {
void (*cl_setup_bdle)(struct sst_dsp *ctx,
struct snd_dma_buffer *dmab_data,
u32 **bdlp, int size, int with_ioc);
void (*cl_setup_controller)(struct sst_dsp *ctx,
struct snd_dma_buffer *dmab_bdl,
unsigned int max_size, u32 page_count);
void (*cl_setup_spb)(struct sst_dsp *ctx,
unsigned int size, bool enable);
void (*cl_cleanup_spb)(struct sst_dsp *ctx);
void (*cl_trigger)(struct sst_dsp *ctx, bool enable);
void (*cl_cleanup_controller)(struct sst_dsp *ctx);
int (*cl_copy_to_dmabuf)(struct sst_dsp *ctx,
const void *bin, u32 size);
void (*cl_stop_dma)(struct sst_dsp *ctx);
};
/**
* skl_cl_dev - holds information for code loader dma transfer
*
* @dmab_data: buffer pointer
* @dmab_bdl: buffer descriptor list
* @bufsize: ring buffer size
* @frags: Last valid buffer descriptor index in the BDL
* @curr_spib_pos: Current position in ring buffer
* @dma_buffer_offset: dma buffer offset
* @ops: operations supported on CL dma
* @wait_queue: wait queue to wake for wake event
* @wake_status: DMA wake status
* @wait_condition: condition to wait on wait queue
* @cl_dma_lock: for synchronized access to cldma
*/
struct skl_cl_dev {
struct snd_dma_buffer dmab_data;
struct snd_dma_buffer dmab_bdl;
unsigned int bufsize;
unsigned int frags;
unsigned int curr_spib_pos;
unsigned int dma_buffer_offset;
struct skl_cl_dev_ops ops;
wait_queue_head_t wait_queue;
int wake_status;
bool wait_condition;
};
#endif /* SKL_SST_CLDMA_H_ */

View file

@ -0,0 +1,342 @@
/*
* skl-sst-dsp.c - SKL SST library generic function
*
* Copyright (C) 2014-15, Intel Corporation.
* Author:Rafal Redzimski <rafal.f.redzimski@intel.com>
* Jeeja KP <jeeja.kp@intel.com>
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <sound/pcm.h>
#include "../common/sst-dsp.h"
#include "../common/sst-ipc.h"
#include "../common/sst-dsp-priv.h"
#include "skl-sst-ipc.h"
/* various timeout values */
#define SKL_DSP_PU_TO 50
#define SKL_DSP_PD_TO 50
#define SKL_DSP_RESET_TO 50
void skl_dsp_set_state_locked(struct sst_dsp *ctx, int state)
{
mutex_lock(&ctx->mutex);
ctx->sst_state = state;
mutex_unlock(&ctx->mutex);
}
static int skl_dsp_core_set_reset_state(struct sst_dsp *ctx)
{
int ret;
/* update bits */
sst_dsp_shim_update_bits_unlocked(ctx,
SKL_ADSP_REG_ADSPCS, SKL_ADSPCS_CRST_MASK,
SKL_ADSPCS_CRST(SKL_DSP_CORES_MASK));
/* poll with timeout to check if operation successful */
ret = sst_dsp_register_poll(ctx,
SKL_ADSP_REG_ADSPCS,
SKL_ADSPCS_CRST_MASK,
SKL_ADSPCS_CRST(SKL_DSP_CORES_MASK),
SKL_DSP_RESET_TO,
"Set reset");
if ((sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
SKL_ADSPCS_CRST(SKL_DSP_CORES_MASK)) !=
SKL_ADSPCS_CRST(SKL_DSP_CORES_MASK)) {
dev_err(ctx->dev, "Set reset state failed\n");
ret = -EIO;
}
return ret;
}
static int skl_dsp_core_unset_reset_state(struct sst_dsp *ctx)
{
int ret;
dev_dbg(ctx->dev, "In %s\n", __func__);
/* update bits */
sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
SKL_ADSPCS_CRST_MASK, 0);
/* poll with timeout to check if operation successful */
ret = sst_dsp_register_poll(ctx,
SKL_ADSP_REG_ADSPCS,
SKL_ADSPCS_CRST_MASK,
0,
SKL_DSP_RESET_TO,
"Unset reset");
if ((sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
SKL_ADSPCS_CRST(SKL_DSP_CORES_MASK)) != 0) {
dev_err(ctx->dev, "Unset reset state failed\n");
ret = -EIO;
}
return ret;
}
static bool is_skl_dsp_core_enable(struct sst_dsp *ctx)
{
int val;
bool is_enable;
val = sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS);
is_enable = ((val & SKL_ADSPCS_CPA(SKL_DSP_CORES_MASK)) &&
(val & SKL_ADSPCS_SPA(SKL_DSP_CORES_MASK)) &&
!(val & SKL_ADSPCS_CRST(SKL_DSP_CORES_MASK)) &&
!(val & SKL_ADSPCS_CSTALL(SKL_DSP_CORES_MASK)));
dev_dbg(ctx->dev, "DSP core is enabled=%d\n", is_enable);
return is_enable;
}
static int skl_dsp_reset_core(struct sst_dsp *ctx)
{
/* stall core */
sst_dsp_shim_write_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
SKL_ADSPCS_CSTALL(SKL_DSP_CORES_MASK));
/* set reset state */
return skl_dsp_core_set_reset_state(ctx);
}
static int skl_dsp_start_core(struct sst_dsp *ctx)
{
int ret;
/* unset reset state */
ret = skl_dsp_core_unset_reset_state(ctx);
if (ret < 0) {
dev_dbg(ctx->dev, "dsp unset reset fails\n");
return ret;
}
/* run core */
dev_dbg(ctx->dev, "run core...\n");
sst_dsp_shim_write_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
~SKL_ADSPCS_CSTALL(SKL_DSP_CORES_MASK));
if (!is_skl_dsp_core_enable(ctx)) {
skl_dsp_reset_core(ctx);
dev_err(ctx->dev, "DSP core enable failed\n");
ret = -EIO;
}
return ret;
}
static int skl_dsp_core_power_up(struct sst_dsp *ctx)
{
int ret;
/* update bits */
sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
SKL_ADSPCS_SPA_MASK, SKL_ADSPCS_SPA(SKL_DSP_CORES_MASK));
/* poll with timeout to check if operation successful */
ret = sst_dsp_register_poll(ctx,
SKL_ADSP_REG_ADSPCS,
SKL_ADSPCS_CPA_MASK,
SKL_ADSPCS_CPA(SKL_DSP_CORES_MASK),
SKL_DSP_PU_TO,
"Power up");
if ((sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
SKL_ADSPCS_CPA(SKL_DSP_CORES_MASK)) !=
SKL_ADSPCS_CPA(SKL_DSP_CORES_MASK)) {
dev_err(ctx->dev, "DSP core power up failed\n");
ret = -EIO;
}
return ret;
}
static int skl_dsp_core_power_down(struct sst_dsp *ctx)
{
/* update bits */
sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
SKL_ADSPCS_SPA_MASK, 0);
/* poll with timeout to check if operation successful */
return sst_dsp_register_poll(ctx,
SKL_ADSP_REG_ADSPCS,
SKL_ADSPCS_SPA_MASK,
0,
SKL_DSP_PD_TO,
"Power down");
}
static int skl_dsp_enable_core(struct sst_dsp *ctx)
{
int ret;
/* power up */
ret = skl_dsp_core_power_up(ctx);
if (ret < 0) {
dev_dbg(ctx->dev, "dsp core power up failed\n");
return ret;
}
return skl_dsp_start_core(ctx);
}
int skl_dsp_disable_core(struct sst_dsp *ctx)
{
int ret;
ret = skl_dsp_reset_core(ctx);
if (ret < 0) {
dev_err(ctx->dev, "dsp core reset failed\n");
return ret;
}
/* power down core*/
ret = skl_dsp_core_power_down(ctx);
if (ret < 0) {
dev_err(ctx->dev, "dsp core power down failed\n");
return ret;
}
if (is_skl_dsp_core_enable(ctx)) {
dev_err(ctx->dev, "DSP core disable failed\n");
ret = -EIO;
}
return ret;
}
int skl_dsp_boot(struct sst_dsp *ctx)
{
int ret;
if (is_skl_dsp_core_enable(ctx)) {
dev_dbg(ctx->dev, "dsp core is already enabled, so reset the dap core\n");
ret = skl_dsp_reset_core(ctx);
if (ret < 0) {
dev_err(ctx->dev, "dsp reset failed\n");
return ret;
}
ret = skl_dsp_start_core(ctx);
if (ret < 0) {
dev_err(ctx->dev, "dsp start failed\n");
return ret;
}
} else {
dev_dbg(ctx->dev, "disable and enable to make sure DSP is invalid state\n");
ret = skl_dsp_disable_core(ctx);
if (ret < 0) {
dev_err(ctx->dev, "dsp disable core failes\n");
return ret;
}
ret = skl_dsp_enable_core(ctx);
}
return ret;
}
irqreturn_t skl_dsp_sst_interrupt(int irq, void *dev_id)
{
struct sst_dsp *ctx = dev_id;
u32 val;
irqreturn_t result = IRQ_NONE;
spin_lock(&ctx->spinlock);
val = sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPIS);
ctx->intr_status = val;
if (val & SKL_ADSPIS_IPC) {
skl_ipc_int_disable(ctx);
result = IRQ_WAKE_THREAD;
}
if (val & SKL_ADSPIS_CL_DMA) {
skl_cldma_int_disable(ctx);
result = IRQ_WAKE_THREAD;
}
spin_unlock(&ctx->spinlock);
return result;
}
int skl_dsp_wake(struct sst_dsp *ctx)
{
return ctx->fw_ops.set_state_D0(ctx);
}
EXPORT_SYMBOL_GPL(skl_dsp_wake);
int skl_dsp_sleep(struct sst_dsp *ctx)
{
return ctx->fw_ops.set_state_D3(ctx);
}
EXPORT_SYMBOL_GPL(skl_dsp_sleep);
struct sst_dsp *skl_dsp_ctx_init(struct device *dev,
struct sst_dsp_device *sst_dev, int irq)
{
int ret;
struct sst_dsp *sst;
sst = devm_kzalloc(dev, sizeof(*sst), GFP_KERNEL);
if (sst == NULL)
return NULL;
spin_lock_init(&sst->spinlock);
mutex_init(&sst->mutex);
sst->dev = dev;
sst->sst_dev = sst_dev;
sst->irq = irq;
sst->ops = sst_dev->ops;
sst->thread_context = sst_dev->thread_context;
/* Initialise SST Audio DSP */
if (sst->ops->init) {
ret = sst->ops->init(sst, NULL);
if (ret < 0)
return NULL;
}
/* Register the ISR */
ret = request_threaded_irq(sst->irq, sst->ops->irq_handler,
sst_dev->thread, IRQF_SHARED, "AudioDSP", sst);
if (ret) {
dev_err(sst->dev, "unable to grab threaded IRQ %d, disabling device\n",
sst->irq);
return NULL;
}
return sst;
}
void skl_dsp_free(struct sst_dsp *dsp)
{
skl_ipc_int_disable(dsp);
free_irq(dsp->irq, dsp);
skl_dsp_disable_core(dsp);
}
EXPORT_SYMBOL_GPL(skl_dsp_free);
bool is_skl_dsp_running(struct sst_dsp *ctx)
{
return (ctx->sst_state == SKL_DSP_RUNNING);
}
EXPORT_SYMBOL_GPL(is_skl_dsp_running);

View file

@ -0,0 +1,145 @@
/*
* Skylake SST DSP Support
*
* Copyright (C) 2014-15, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#ifndef __SKL_SST_DSP_H__
#define __SKL_SST_DSP_H__
#include <linux/interrupt.h>
#include <sound/memalloc.h>
#include "skl-sst-cldma.h"
struct sst_dsp;
struct skl_sst;
struct sst_dsp_device;
/* Intel HD Audio General DSP Registers */
#define SKL_ADSP_GEN_BASE 0x0
#define SKL_ADSP_REG_ADSPCS (SKL_ADSP_GEN_BASE + 0x04)
#define SKL_ADSP_REG_ADSPIC (SKL_ADSP_GEN_BASE + 0x08)
#define SKL_ADSP_REG_ADSPIS (SKL_ADSP_GEN_BASE + 0x0C)
#define SKL_ADSP_REG_ADSPIC2 (SKL_ADSP_GEN_BASE + 0x10)
#define SKL_ADSP_REG_ADSPIS2 (SKL_ADSP_GEN_BASE + 0x14)
/* Intel HD Audio Inter-Processor Communication Registers */
#define SKL_ADSP_IPC_BASE 0x40
#define SKL_ADSP_REG_HIPCT (SKL_ADSP_IPC_BASE + 0x00)
#define SKL_ADSP_REG_HIPCTE (SKL_ADSP_IPC_BASE + 0x04)
#define SKL_ADSP_REG_HIPCI (SKL_ADSP_IPC_BASE + 0x08)
#define SKL_ADSP_REG_HIPCIE (SKL_ADSP_IPC_BASE + 0x0C)
#define SKL_ADSP_REG_HIPCCTL (SKL_ADSP_IPC_BASE + 0x10)
/* HIPCI */
#define SKL_ADSP_REG_HIPCI_BUSY BIT(31)
/* HIPCIE */
#define SKL_ADSP_REG_HIPCIE_DONE BIT(30)
/* HIPCCTL */
#define SKL_ADSP_REG_HIPCCTL_DONE BIT(1)
#define SKL_ADSP_REG_HIPCCTL_BUSY BIT(0)
/* HIPCT */
#define SKL_ADSP_REG_HIPCT_BUSY BIT(31)
/* Intel HD Audio SRAM Window 1 */
#define SKL_ADSP_SRAM1_BASE 0xA000
#define SKL_ADSP_MMIO_LEN 0x10000
#define SKL_ADSP_W0_STAT_SZ 0x800
#define SKL_ADSP_W0_UP_SZ 0x800
#define SKL_ADSP_W1_SZ 0x1000
#define SKL_FW_STS_MASK 0xf
#define SKL_FW_INIT 0x1
#define SKL_FW_RFW_START 0xf
#define SKL_ADSPIC_IPC 1
#define SKL_ADSPIS_IPC 1
/* ADSPCS - Audio DSP Control & Status */
#define SKL_DSP_CORES 1
#define SKL_DSP_CORE0_MASK 1
#define SKL_DSP_CORES_MASK ((1 << SKL_DSP_CORES) - 1)
/* Core Reset - asserted high */
#define SKL_ADSPCS_CRST_SHIFT 0
#define SKL_ADSPCS_CRST_MASK (SKL_DSP_CORES_MASK << SKL_ADSPCS_CRST_SHIFT)
#define SKL_ADSPCS_CRST(x) ((x << SKL_ADSPCS_CRST_SHIFT) & SKL_ADSPCS_CRST_MASK)
/* Core run/stall - when set to '1' core is stalled */
#define SKL_ADSPCS_CSTALL_SHIFT 8
#define SKL_ADSPCS_CSTALL_MASK (SKL_DSP_CORES_MASK << \
SKL_ADSPCS_CSTALL_SHIFT)
#define SKL_ADSPCS_CSTALL(x) ((x << SKL_ADSPCS_CSTALL_SHIFT) & \
SKL_ADSPCS_CSTALL_MASK)
/* Set Power Active - when set to '1' turn cores on */
#define SKL_ADSPCS_SPA_SHIFT 16
#define SKL_ADSPCS_SPA_MASK (SKL_DSP_CORES_MASK << SKL_ADSPCS_SPA_SHIFT)
#define SKL_ADSPCS_SPA(x) ((x << SKL_ADSPCS_SPA_SHIFT) & SKL_ADSPCS_SPA_MASK)
/* Current Power Active - power status of cores, set by hardware */
#define SKL_ADSPCS_CPA_SHIFT 24
#define SKL_ADSPCS_CPA_MASK (SKL_DSP_CORES_MASK << SKL_ADSPCS_CPA_SHIFT)
#define SKL_ADSPCS_CPA(x) ((x << SKL_ADSPCS_CPA_SHIFT) & SKL_ADSPCS_CPA_MASK)
#define SST_DSP_POWER_D0 0x0 /* full On */
#define SST_DSP_POWER_D3 0x3 /* Off */
enum skl_dsp_states {
SKL_DSP_RUNNING = 1,
SKL_DSP_RESET,
};
struct skl_dsp_fw_ops {
int (*load_fw)(struct sst_dsp *ctx);
/* FW module parser/loader */
int (*parse_fw)(struct sst_dsp *ctx);
int (*set_state_D0)(struct sst_dsp *ctx);
int (*set_state_D3)(struct sst_dsp *ctx);
unsigned int (*get_fw_errcode)(struct sst_dsp *ctx);
};
struct skl_dsp_loader_ops {
int (*alloc_dma_buf)(struct device *dev,
struct snd_dma_buffer *dmab, size_t size);
int (*free_dma_buf)(struct device *dev,
struct snd_dma_buffer *dmab);
};
void skl_cldma_process_intr(struct sst_dsp *ctx);
void skl_cldma_int_disable(struct sst_dsp *ctx);
int skl_cldma_prepare(struct sst_dsp *ctx);
void skl_dsp_set_state_locked(struct sst_dsp *ctx, int state);
struct sst_dsp *skl_dsp_ctx_init(struct device *dev,
struct sst_dsp_device *sst_dev, int irq);
int skl_dsp_disable_core(struct sst_dsp *ctx);
bool is_skl_dsp_running(struct sst_dsp *ctx);
irqreturn_t skl_dsp_sst_interrupt(int irq, void *dev_id);
int skl_dsp_wake(struct sst_dsp *ctx);
int skl_dsp_sleep(struct sst_dsp *ctx);
void skl_dsp_free(struct sst_dsp *dsp);
int skl_dsp_boot(struct sst_dsp *ctx);
int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
struct skl_dsp_loader_ops dsp_ops, struct skl_sst **dsp);
void skl_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx);
#endif /*__SKL_SST_DSP_H__*/

View file

@ -0,0 +1,771 @@
/*
* skl-sst-ipc.c - Intel skl IPC Support
*
* Copyright (C) 2014-15, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/device.h>
#include "../common/sst-dsp.h"
#include "../common/sst-dsp-priv.h"
#include "skl-sst-dsp.h"
#include "skl-sst-ipc.h"
#define IPC_IXC_STATUS_BITS 24
/* Global Message - Generic */
#define IPC_GLB_TYPE_SHIFT 24
#define IPC_GLB_TYPE_MASK (0xf << IPC_GLB_TYPE_SHIFT)
#define IPC_GLB_TYPE(x) ((x) << IPC_GLB_TYPE_SHIFT)
/* Global Message - Reply */
#define IPC_GLB_REPLY_STATUS_SHIFT 24
#define IPC_GLB_REPLY_STATUS_MASK ((0x1 << IPC_GLB_REPLY_STATUS_SHIFT) - 1)
#define IPC_GLB_REPLY_STATUS(x) ((x) << IPC_GLB_REPLY_STATUS_SHIFT)
#define IPC_TIMEOUT_MSECS 3000
#define IPC_EMPTY_LIST_SIZE 8
#define IPC_MSG_TARGET_SHIFT 30
#define IPC_MSG_TARGET_MASK 0x1
#define IPC_MSG_TARGET(x) (((x) & IPC_MSG_TARGET_MASK) \
<< IPC_MSG_TARGET_SHIFT)
#define IPC_MSG_DIR_SHIFT 29
#define IPC_MSG_DIR_MASK 0x1
#define IPC_MSG_DIR(x) (((x) & IPC_MSG_DIR_MASK) \
<< IPC_MSG_DIR_SHIFT)
/* Global Notification Message */
#define IPC_GLB_NOTIFY_TYPE_SHIFT 16
#define IPC_GLB_NOTIFY_TYPE_MASK 0xFF
#define IPC_GLB_NOTIFY_TYPE(x) (((x) >> IPC_GLB_NOTIFY_TYPE_SHIFT) \
& IPC_GLB_NOTIFY_TYPE_MASK)
#define IPC_GLB_NOTIFY_MSG_TYPE_SHIFT 24
#define IPC_GLB_NOTIFY_MSG_TYPE_MASK 0x1F
#define IPC_GLB_NOTIFY_MSG_TYPE(x) (((x) >> IPC_GLB_NOTIFY_MSG_TYPE_SHIFT) \
& IPC_GLB_NOTIFY_MSG_TYPE_MASK)
#define IPC_GLB_NOTIFY_RSP_SHIFT 29
#define IPC_GLB_NOTIFY_RSP_MASK 0x1
#define IPC_GLB_NOTIFY_RSP_TYPE(x) (((x) >> IPC_GLB_NOTIFY_RSP_SHIFT) \
& IPC_GLB_NOTIFY_RSP_MASK)
/* Pipeline operations */
/* Create pipeline message */
#define IPC_PPL_MEM_SIZE_SHIFT 0
#define IPC_PPL_MEM_SIZE_MASK 0x7FF
#define IPC_PPL_MEM_SIZE(x) (((x) & IPC_PPL_MEM_SIZE_MASK) \
<< IPC_PPL_MEM_SIZE_SHIFT)
#define IPC_PPL_TYPE_SHIFT 11
#define IPC_PPL_TYPE_MASK 0x1F
#define IPC_PPL_TYPE(x) (((x) & IPC_PPL_TYPE_MASK) \
<< IPC_PPL_TYPE_SHIFT)
#define IPC_INSTANCE_ID_SHIFT 16
#define IPC_INSTANCE_ID_MASK 0xFF
#define IPC_INSTANCE_ID(x) (((x) & IPC_INSTANCE_ID_MASK) \
<< IPC_INSTANCE_ID_SHIFT)
/* Set pipeline state message */
#define IPC_PPL_STATE_SHIFT 0
#define IPC_PPL_STATE_MASK 0x1F
#define IPC_PPL_STATE(x) (((x) & IPC_PPL_STATE_MASK) \
<< IPC_PPL_STATE_SHIFT)
/* Module operations primary register */
#define IPC_MOD_ID_SHIFT 0
#define IPC_MOD_ID_MASK 0xFFFF
#define IPC_MOD_ID(x) (((x) & IPC_MOD_ID_MASK) \
<< IPC_MOD_ID_SHIFT)
#define IPC_MOD_INSTANCE_ID_SHIFT 16
#define IPC_MOD_INSTANCE_ID_MASK 0xFF
#define IPC_MOD_INSTANCE_ID(x) (((x) & IPC_MOD_INSTANCE_ID_MASK) \
<< IPC_MOD_INSTANCE_ID_SHIFT)
/* Init instance message extension register */
#define IPC_PARAM_BLOCK_SIZE_SHIFT 0
#define IPC_PARAM_BLOCK_SIZE_MASK 0xFFFF
#define IPC_PARAM_BLOCK_SIZE(x) (((x) & IPC_PARAM_BLOCK_SIZE_MASK) \
<< IPC_PARAM_BLOCK_SIZE_SHIFT)
#define IPC_PPL_INSTANCE_ID_SHIFT 16
#define IPC_PPL_INSTANCE_ID_MASK 0xFF
#define IPC_PPL_INSTANCE_ID(x) (((x) & IPC_PPL_INSTANCE_ID_MASK) \
<< IPC_PPL_INSTANCE_ID_SHIFT)
#define IPC_CORE_ID_SHIFT 24
#define IPC_CORE_ID_MASK 0x1F
#define IPC_CORE_ID(x) (((x) & IPC_CORE_ID_MASK) \
<< IPC_CORE_ID_SHIFT)
/* Bind/Unbind message extension register */
#define IPC_DST_MOD_ID_SHIFT 0
#define IPC_DST_MOD_ID(x) (((x) & IPC_MOD_ID_MASK) \
<< IPC_DST_MOD_ID_SHIFT)
#define IPC_DST_MOD_INSTANCE_ID_SHIFT 16
#define IPC_DST_MOD_INSTANCE_ID(x) (((x) & IPC_MOD_INSTANCE_ID_MASK) \
<< IPC_DST_MOD_INSTANCE_ID_SHIFT)
#define IPC_DST_QUEUE_SHIFT 24
#define IPC_DST_QUEUE_MASK 0x7
#define IPC_DST_QUEUE(x) (((x) & IPC_DST_QUEUE_MASK) \
<< IPC_DST_QUEUE_SHIFT)
#define IPC_SRC_QUEUE_SHIFT 27
#define IPC_SRC_QUEUE_MASK 0x7
#define IPC_SRC_QUEUE(x) (((x) & IPC_SRC_QUEUE_MASK) \
<< IPC_SRC_QUEUE_SHIFT)
/* Save pipeline messgae extension register */
#define IPC_DMA_ID_SHIFT 0
#define IPC_DMA_ID_MASK 0x1F
#define IPC_DMA_ID(x) (((x) & IPC_DMA_ID_MASK) \
<< IPC_DMA_ID_SHIFT)
/* Large Config message extension register */
#define IPC_DATA_OFFSET_SZ_SHIFT 0
#define IPC_DATA_OFFSET_SZ_MASK 0xFFFFF
#define IPC_DATA_OFFSET_SZ(x) (((x) & IPC_DATA_OFFSET_SZ_MASK) \
<< IPC_DATA_OFFSET_SZ_SHIFT)
#define IPC_DATA_OFFSET_SZ_CLEAR ~(IPC_DATA_OFFSET_SZ_MASK \
<< IPC_DATA_OFFSET_SZ_SHIFT)
#define IPC_LARGE_PARAM_ID_SHIFT 20
#define IPC_LARGE_PARAM_ID_MASK 0xFF
#define IPC_LARGE_PARAM_ID(x) (((x) & IPC_LARGE_PARAM_ID_MASK) \
<< IPC_LARGE_PARAM_ID_SHIFT)
#define IPC_FINAL_BLOCK_SHIFT 28
#define IPC_FINAL_BLOCK_MASK 0x1
#define IPC_FINAL_BLOCK(x) (((x) & IPC_FINAL_BLOCK_MASK) \
<< IPC_FINAL_BLOCK_SHIFT)
#define IPC_INITIAL_BLOCK_SHIFT 29
#define IPC_INITIAL_BLOCK_MASK 0x1
#define IPC_INITIAL_BLOCK(x) (((x) & IPC_INITIAL_BLOCK_MASK) \
<< IPC_INITIAL_BLOCK_SHIFT)
#define IPC_INITIAL_BLOCK_CLEAR ~(IPC_INITIAL_BLOCK_MASK \
<< IPC_INITIAL_BLOCK_SHIFT)
enum skl_ipc_msg_target {
IPC_FW_GEN_MSG = 0,
IPC_MOD_MSG = 1
};
enum skl_ipc_msg_direction {
IPC_MSG_REQUEST = 0,
IPC_MSG_REPLY = 1
};
/* Global Message Types */
enum skl_ipc_glb_type {
IPC_GLB_GET_FW_VERSION = 0, /* Retrieves firmware version */
IPC_GLB_LOAD_MULTIPLE_MODS = 15,
IPC_GLB_UNLOAD_MULTIPLE_MODS = 16,
IPC_GLB_CREATE_PPL = 17,
IPC_GLB_DELETE_PPL = 18,
IPC_GLB_SET_PPL_STATE = 19,
IPC_GLB_GET_PPL_STATE = 20,
IPC_GLB_GET_PPL_CONTEXT_SIZE = 21,
IPC_GLB_SAVE_PPL = 22,
IPC_GLB_RESTORE_PPL = 23,
IPC_GLB_NOTIFY = 26,
IPC_GLB_MAX_IPC_MSG_NUMBER = 31 /* Maximum message number */
};
enum skl_ipc_glb_reply {
IPC_GLB_REPLY_SUCCESS = 0,
IPC_GLB_REPLY_UNKNOWN_MSG_TYPE = 1,
IPC_GLB_REPLY_ERROR_INVALID_PARAM = 2,
IPC_GLB_REPLY_BUSY = 3,
IPC_GLB_REPLY_PENDING = 4,
IPC_GLB_REPLY_FAILURE = 5,
IPC_GLB_REPLY_INVALID_REQUEST = 6,
IPC_GLB_REPLY_OUT_OF_MEMORY = 7,
IPC_GLB_REPLY_OUT_OF_MIPS = 8,
IPC_GLB_REPLY_INVALID_RESOURCE_ID = 9,
IPC_GLB_REPLY_INVALID_RESOURCE_STATE = 10,
IPC_GLB_REPLY_MOD_MGMT_ERROR = 100,
IPC_GLB_REPLY_MOD_LOAD_CL_FAILED = 101,
IPC_GLB_REPLY_MOD_LOAD_INVALID_HASH = 102,
IPC_GLB_REPLY_MOD_UNLOAD_INST_EXIST = 103,
IPC_GLB_REPLY_MOD_NOT_INITIALIZED = 104,
IPC_GLB_REPLY_INVALID_CONFIG_PARAM_ID = 120,
IPC_GLB_REPLY_INVALID_CONFIG_DATA_LEN = 121,
IPC_GLB_REPLY_GATEWAY_NOT_INITIALIZED = 140,
IPC_GLB_REPLY_GATEWAY_NOT_EXIST = 141,
IPC_GLB_REPLY_PPL_NOT_INITIALIZED = 160,
IPC_GLB_REPLY_PPL_NOT_EXIST = 161,
IPC_GLB_REPLY_PPL_SAVE_FAILED = 162,
IPC_GLB_REPLY_PPL_RESTORE_FAILED = 163,
IPC_MAX_STATUS = ((1<<IPC_IXC_STATUS_BITS)-1)
};
enum skl_ipc_notification_type {
IPC_GLB_NOTIFY_GLITCH = 0,
IPC_GLB_NOTIFY_OVERRUN = 1,
IPC_GLB_NOTIFY_UNDERRUN = 2,
IPC_GLB_NOTIFY_END_STREAM = 3,
IPC_GLB_NOTIFY_PHRASE_DETECTED = 4,
IPC_GLB_NOTIFY_RESOURCE_EVENT = 5,
IPC_GLB_NOTIFY_LOG_BUFFER_STATUS = 6,
IPC_GLB_NOTIFY_TIMESTAMP_CAPTURED = 7,
IPC_GLB_NOTIFY_FW_READY = 8
};
/* Module Message Types */
enum skl_ipc_module_msg {
IPC_MOD_INIT_INSTANCE = 0,
IPC_MOD_CONFIG_GET = 1,
IPC_MOD_CONFIG_SET = 2,
IPC_MOD_LARGE_CONFIG_GET = 3,
IPC_MOD_LARGE_CONFIG_SET = 4,
IPC_MOD_BIND = 5,
IPC_MOD_UNBIND = 6,
IPC_MOD_SET_DX = 7
};
static void skl_ipc_tx_data_copy(struct ipc_message *msg, char *tx_data,
size_t tx_size)
{
if (tx_size)
memcpy(msg->tx_data, tx_data, tx_size);
}
static bool skl_ipc_is_dsp_busy(struct sst_dsp *dsp)
{
u32 hipci;
hipci = sst_dsp_shim_read_unlocked(dsp, SKL_ADSP_REG_HIPCI);
return (hipci & SKL_ADSP_REG_HIPCI_BUSY);
}
/* Lock to be held by caller */
static void skl_ipc_tx_msg(struct sst_generic_ipc *ipc, struct ipc_message *msg)
{
struct skl_ipc_header *header = (struct skl_ipc_header *)(&msg->header);
if (msg->tx_size)
sst_dsp_outbox_write(ipc->dsp, msg->tx_data, msg->tx_size);
sst_dsp_shim_write_unlocked(ipc->dsp, SKL_ADSP_REG_HIPCIE,
header->extension);
sst_dsp_shim_write_unlocked(ipc->dsp, SKL_ADSP_REG_HIPCI,
header->primary | SKL_ADSP_REG_HIPCI_BUSY);
}
static struct ipc_message *skl_ipc_reply_get_msg(struct sst_generic_ipc *ipc,
u64 ipc_header)
{
struct ipc_message *msg = NULL;
struct skl_ipc_header *header = (struct skl_ipc_header *)(&ipc_header);
if (list_empty(&ipc->rx_list)) {
dev_err(ipc->dev, "ipc: rx list is empty but received 0x%x\n",
header->primary);
goto out;
}
msg = list_first_entry(&ipc->rx_list, struct ipc_message, list);
out:
return msg;
}
static int skl_ipc_process_notification(struct sst_generic_ipc *ipc,
struct skl_ipc_header header)
{
struct skl_sst *skl = container_of(ipc, struct skl_sst, ipc);
if (IPC_GLB_NOTIFY_MSG_TYPE(header.primary)) {
switch (IPC_GLB_NOTIFY_TYPE(header.primary)) {
case IPC_GLB_NOTIFY_UNDERRUN:
dev_err(ipc->dev, "FW Underrun %x\n", header.primary);
break;
case IPC_GLB_NOTIFY_RESOURCE_EVENT:
dev_err(ipc->dev, "MCPS Budget Violation: %x\n",
header.primary);
break;
case IPC_GLB_NOTIFY_FW_READY:
skl->boot_complete = true;
wake_up(&skl->boot_wait);
break;
default:
dev_err(ipc->dev, "ipc: Unhandled error msg=%x",
header.primary);
break;
}
}
return 0;
}
static void skl_ipc_process_reply(struct sst_generic_ipc *ipc,
struct skl_ipc_header header)
{
struct ipc_message *msg;
u32 reply = header.primary & IPC_GLB_REPLY_STATUS_MASK;
u64 *ipc_header = (u64 *)(&header);
msg = skl_ipc_reply_get_msg(ipc, *ipc_header);
if (msg == NULL) {
dev_dbg(ipc->dev, "ipc: rx list is empty\n");
return;
}
/* first process the header */
switch (reply) {
case IPC_GLB_REPLY_SUCCESS:
dev_info(ipc->dev, "ipc FW reply %x: success\n", header.primary);
break;
case IPC_GLB_REPLY_OUT_OF_MEMORY:
dev_err(ipc->dev, "ipc fw reply: %x: no memory\n", header.primary);
msg->errno = -ENOMEM;
break;
case IPC_GLB_REPLY_BUSY:
dev_err(ipc->dev, "ipc fw reply: %x: Busy\n", header.primary);
msg->errno = -EBUSY;
break;
default:
dev_err(ipc->dev, "Unknown ipc reply: 0x%x", reply);
msg->errno = -EINVAL;
break;
}
if (reply != IPC_GLB_REPLY_SUCCESS) {
dev_err(ipc->dev, "ipc FW reply: reply=%d", reply);
dev_err(ipc->dev, "FW Error Code: %u\n",
ipc->dsp->fw_ops.get_fw_errcode(ipc->dsp));
}
list_del(&msg->list);
sst_ipc_tx_msg_reply_complete(ipc, msg);
}
irqreturn_t skl_dsp_irq_thread_handler(int irq, void *context)
{
struct sst_dsp *dsp = context;
struct skl_sst *skl = sst_dsp_get_thread_context(dsp);
struct sst_generic_ipc *ipc = &skl->ipc;
struct skl_ipc_header header = {0};
u32 hipcie, hipct, hipcte;
int ipc_irq = 0;
if (dsp->intr_status & SKL_ADSPIS_CL_DMA)
skl_cldma_process_intr(dsp);
/* Here we handle IPC interrupts only */
if (!(dsp->intr_status & SKL_ADSPIS_IPC))
return IRQ_NONE;
hipcie = sst_dsp_shim_read_unlocked(dsp, SKL_ADSP_REG_HIPCIE);
hipct = sst_dsp_shim_read_unlocked(dsp, SKL_ADSP_REG_HIPCT);
/* reply message from DSP */
if (hipcie & SKL_ADSP_REG_HIPCIE_DONE) {
sst_dsp_shim_update_bits(dsp, SKL_ADSP_REG_HIPCCTL,
SKL_ADSP_REG_HIPCCTL_DONE, 0);
/* clear DONE bit - tell DSP we have completed the operation */
sst_dsp_shim_update_bits_forced(dsp, SKL_ADSP_REG_HIPCIE,
SKL_ADSP_REG_HIPCIE_DONE, SKL_ADSP_REG_HIPCIE_DONE);
ipc_irq = 1;
/* unmask Done interrupt */
sst_dsp_shim_update_bits(dsp, SKL_ADSP_REG_HIPCCTL,
SKL_ADSP_REG_HIPCCTL_DONE, SKL_ADSP_REG_HIPCCTL_DONE);
}
/* New message from DSP */
if (hipct & SKL_ADSP_REG_HIPCT_BUSY) {
hipcte = sst_dsp_shim_read_unlocked(dsp, SKL_ADSP_REG_HIPCTE);
header.primary = hipct;
header.extension = hipcte;
dev_dbg(dsp->dev, "IPC irq: Firmware respond primary:%x",
header.primary);
dev_dbg(dsp->dev, "IPC irq: Firmware respond extension:%x",
header.extension);
if (IPC_GLB_NOTIFY_RSP_TYPE(header.primary)) {
/* Handle Immediate reply from DSP Core */
skl_ipc_process_reply(ipc, header);
} else {
dev_dbg(dsp->dev, "IPC irq: Notification from firmware\n");
skl_ipc_process_notification(ipc, header);
}
/* clear busy interrupt */
sst_dsp_shim_update_bits_forced(dsp, SKL_ADSP_REG_HIPCT,
SKL_ADSP_REG_HIPCT_BUSY, SKL_ADSP_REG_HIPCT_BUSY);
ipc_irq = 1;
}
if (ipc_irq == 0)
return IRQ_NONE;
skl_ipc_int_enable(dsp);
/* continue to send any remaining messages... */
queue_kthread_work(&ipc->kworker, &ipc->kwork);
return IRQ_HANDLED;
}
void skl_ipc_int_enable(struct sst_dsp *ctx)
{
sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_ADSPIC,
SKL_ADSPIC_IPC, SKL_ADSPIC_IPC);
}
void skl_ipc_int_disable(struct sst_dsp *ctx)
{
sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPIC,
SKL_ADSPIC_IPC, 0);
}
void skl_ipc_op_int_enable(struct sst_dsp *ctx)
{
/* enable IPC DONE interrupt */
sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_HIPCCTL,
SKL_ADSP_REG_HIPCCTL_DONE, SKL_ADSP_REG_HIPCCTL_DONE);
/* Enable IPC BUSY interrupt */
sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_HIPCCTL,
SKL_ADSP_REG_HIPCCTL_BUSY, SKL_ADSP_REG_HIPCCTL_BUSY);
}
bool skl_ipc_int_status(struct sst_dsp *ctx)
{
return sst_dsp_shim_read_unlocked(ctx,
SKL_ADSP_REG_ADSPIS) & SKL_ADSPIS_IPC;
}
int skl_ipc_init(struct device *dev, struct skl_sst *skl)
{
struct sst_generic_ipc *ipc;
int err;
ipc = &skl->ipc;
ipc->dsp = skl->dsp;
ipc->dev = dev;
ipc->tx_data_max_size = SKL_ADSP_W1_SZ;
ipc->rx_data_max_size = SKL_ADSP_W0_UP_SZ;
err = sst_ipc_init(ipc);
if (err)
return err;
ipc->ops.tx_msg = skl_ipc_tx_msg;
ipc->ops.tx_data_copy = skl_ipc_tx_data_copy;
ipc->ops.is_dsp_busy = skl_ipc_is_dsp_busy;
return 0;
}
void skl_ipc_free(struct sst_generic_ipc *ipc)
{
/* Disable IPC DONE interrupt */
sst_dsp_shim_update_bits(ipc->dsp, SKL_ADSP_REG_HIPCCTL,
SKL_ADSP_REG_HIPCCTL_DONE, 0);
/* Disable IPC BUSY interrupt */
sst_dsp_shim_update_bits(ipc->dsp, SKL_ADSP_REG_HIPCCTL,
SKL_ADSP_REG_HIPCCTL_BUSY, 0);
sst_ipc_fini(ipc);
}
int skl_ipc_create_pipeline(struct sst_generic_ipc *ipc,
u16 ppl_mem_size, u8 ppl_type, u8 instance_id)
{
struct skl_ipc_header header = {0};
u64 *ipc_header = (u64 *)(&header);
int ret;
header.primary = IPC_MSG_TARGET(IPC_FW_GEN_MSG);
header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
header.primary |= IPC_GLB_TYPE(IPC_GLB_CREATE_PPL);
header.primary |= IPC_INSTANCE_ID(instance_id);
header.primary |= IPC_PPL_TYPE(ppl_type);
header.primary |= IPC_PPL_MEM_SIZE(ppl_mem_size);
dev_dbg(ipc->dev, "In %s header=%d\n", __func__, header.primary);
ret = sst_ipc_tx_message_wait(ipc, *ipc_header, NULL, 0, NULL, 0);
if (ret < 0) {
dev_err(ipc->dev, "ipc: create pipeline fail, err: %d\n", ret);
return ret;
}
return ret;
}
EXPORT_SYMBOL_GPL(skl_ipc_create_pipeline);
int skl_ipc_delete_pipeline(struct sst_generic_ipc *ipc, u8 instance_id)
{
struct skl_ipc_header header = {0};
u64 *ipc_header = (u64 *)(&header);
int ret;
header.primary = IPC_MSG_TARGET(IPC_FW_GEN_MSG);
header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
header.primary |= IPC_GLB_TYPE(IPC_GLB_DELETE_PPL);
header.primary |= IPC_INSTANCE_ID(instance_id);
dev_dbg(ipc->dev, "In %s header=%d\n", __func__, header.primary);
ret = sst_ipc_tx_message_wait(ipc, *ipc_header, NULL, 0, NULL, 0);
if (ret < 0) {
dev_err(ipc->dev, "ipc: delete pipeline failed, err %d\n", ret);
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(skl_ipc_delete_pipeline);
int skl_ipc_set_pipeline_state(struct sst_generic_ipc *ipc,
u8 instance_id, enum skl_ipc_pipeline_state state)
{
struct skl_ipc_header header = {0};
u64 *ipc_header = (u64 *)(&header);
int ret;
header.primary = IPC_MSG_TARGET(IPC_FW_GEN_MSG);
header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
header.primary |= IPC_GLB_TYPE(IPC_GLB_SET_PPL_STATE);
header.primary |= IPC_INSTANCE_ID(instance_id);
header.primary |= IPC_PPL_STATE(state);
dev_dbg(ipc->dev, "In %s header=%d\n", __func__, header.primary);
ret = sst_ipc_tx_message_wait(ipc, *ipc_header, NULL, 0, NULL, 0);
if (ret < 0) {
dev_err(ipc->dev, "ipc: set pipeline state failed, err: %d\n", ret);
return ret;
}
return ret;
}
EXPORT_SYMBOL_GPL(skl_ipc_set_pipeline_state);
int
skl_ipc_save_pipeline(struct sst_generic_ipc *ipc, u8 instance_id, int dma_id)
{
struct skl_ipc_header header = {0};
u64 *ipc_header = (u64 *)(&header);
int ret;
header.primary = IPC_MSG_TARGET(IPC_FW_GEN_MSG);
header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
header.primary |= IPC_GLB_TYPE(IPC_GLB_SAVE_PPL);
header.primary |= IPC_INSTANCE_ID(instance_id);
header.extension = IPC_DMA_ID(dma_id);
dev_dbg(ipc->dev, "In %s header=%d\n", __func__, header.primary);
ret = sst_ipc_tx_message_wait(ipc, *ipc_header, NULL, 0, NULL, 0);
if (ret < 0) {
dev_err(ipc->dev, "ipc: save pipeline failed, err: %d\n", ret);
return ret;
}
return ret;
}
EXPORT_SYMBOL_GPL(skl_ipc_save_pipeline);
int skl_ipc_restore_pipeline(struct sst_generic_ipc *ipc, u8 instance_id)
{
struct skl_ipc_header header = {0};
u64 *ipc_header = (u64 *)(&header);
int ret;
header.primary = IPC_MSG_TARGET(IPC_FW_GEN_MSG);
header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
header.primary |= IPC_GLB_TYPE(IPC_GLB_RESTORE_PPL);
header.primary |= IPC_INSTANCE_ID(instance_id);
dev_dbg(ipc->dev, "In %s header=%d\n", __func__, header.primary);
ret = sst_ipc_tx_message_wait(ipc, *ipc_header, NULL, 0, NULL, 0);
if (ret < 0) {
dev_err(ipc->dev, "ipc: restore pipeline failed, err: %d\n", ret);
return ret;
}
return ret;
}
EXPORT_SYMBOL_GPL(skl_ipc_restore_pipeline);
int skl_ipc_set_dx(struct sst_generic_ipc *ipc, u8 instance_id,
u16 module_id, struct skl_ipc_dxstate_info *dx)
{
struct skl_ipc_header header = {0};
u64 *ipc_header = (u64 *)(&header);
int ret;
header.primary = IPC_MSG_TARGET(IPC_MOD_MSG);
header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
header.primary |= IPC_GLB_TYPE(IPC_MOD_SET_DX);
header.primary |= IPC_MOD_INSTANCE_ID(instance_id);
header.primary |= IPC_MOD_ID(module_id);
dev_dbg(ipc->dev, "In %s primary =%x ext=%x\n", __func__,
header.primary, header.extension);
ret = sst_ipc_tx_message_wait(ipc, *ipc_header,
dx, sizeof(dx), NULL, 0);
if (ret < 0) {
dev_err(ipc->dev, "ipc: set dx failed, err %d\n", ret);
return ret;
}
return ret;
}
EXPORT_SYMBOL_GPL(skl_ipc_set_dx);
int skl_ipc_init_instance(struct sst_generic_ipc *ipc,
struct skl_ipc_init_instance_msg *msg, void *param_data)
{
struct skl_ipc_header header = {0};
u64 *ipc_header = (u64 *)(&header);
int ret;
u32 *buffer = (u32 *)param_data;
/* param_block_size must be in dwords */
u16 param_block_size = msg->param_data_size / sizeof(u32);
print_hex_dump(KERN_DEBUG, NULL, DUMP_PREFIX_NONE,
16, 4, buffer, param_block_size, false);
header.primary = IPC_MSG_TARGET(IPC_MOD_MSG);
header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
header.primary |= IPC_GLB_TYPE(IPC_MOD_INIT_INSTANCE);
header.primary |= IPC_MOD_INSTANCE_ID(msg->instance_id);
header.primary |= IPC_MOD_ID(msg->module_id);
header.extension = IPC_CORE_ID(msg->core_id);
header.extension |= IPC_PPL_INSTANCE_ID(msg->ppl_instance_id);
header.extension |= IPC_PARAM_BLOCK_SIZE(param_block_size);
dev_dbg(ipc->dev, "In %s primary =%x ext=%x\n", __func__,
header.primary, header.extension);
ret = sst_ipc_tx_message_wait(ipc, *ipc_header, param_data,
msg->param_data_size, NULL, 0);
if (ret < 0) {
dev_err(ipc->dev, "ipc: init instance failed\n");
return ret;
}
return ret;
}
EXPORT_SYMBOL_GPL(skl_ipc_init_instance);
int skl_ipc_bind_unbind(struct sst_generic_ipc *ipc,
struct skl_ipc_bind_unbind_msg *msg)
{
struct skl_ipc_header header = {0};
u64 *ipc_header = (u64 *)(&header);
u8 bind_unbind = msg->bind ? IPC_MOD_BIND : IPC_MOD_UNBIND;
int ret;
header.primary = IPC_MSG_TARGET(IPC_MOD_MSG);
header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
header.primary |= IPC_GLB_TYPE(bind_unbind);
header.primary |= IPC_MOD_INSTANCE_ID(msg->instance_id);
header.primary |= IPC_MOD_ID(msg->module_id);
header.extension = IPC_DST_MOD_ID(msg->dst_module_id);
header.extension |= IPC_DST_MOD_INSTANCE_ID(msg->dst_instance_id);
header.extension |= IPC_DST_QUEUE(msg->dst_queue);
header.extension |= IPC_SRC_QUEUE(msg->src_queue);
dev_dbg(ipc->dev, "In %s hdr=%x ext=%x\n", __func__, header.primary,
header.extension);
ret = sst_ipc_tx_message_wait(ipc, *ipc_header, NULL, 0, NULL, 0);
if (ret < 0) {
dev_err(ipc->dev, "ipc: bind/unbind faileden");
return ret;
}
return ret;
}
EXPORT_SYMBOL_GPL(skl_ipc_bind_unbind);
int skl_ipc_set_large_config(struct sst_generic_ipc *ipc,
struct skl_ipc_large_config_msg *msg, u32 *param)
{
struct skl_ipc_header header = {0};
u64 *ipc_header = (u64 *)(&header);
int ret = 0;
size_t sz_remaining, tx_size, data_offset;
header.primary = IPC_MSG_TARGET(IPC_MOD_MSG);
header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
header.primary |= IPC_GLB_TYPE(IPC_MOD_LARGE_CONFIG_SET);
header.primary |= IPC_MOD_INSTANCE_ID(msg->instance_id);
header.primary |= IPC_MOD_ID(msg->module_id);
header.extension = IPC_DATA_OFFSET_SZ(msg->param_data_size);
header.extension |= IPC_LARGE_PARAM_ID(msg->large_param_id);
header.extension |= IPC_FINAL_BLOCK(0);
header.extension |= IPC_INITIAL_BLOCK(1);
sz_remaining = msg->param_data_size;
data_offset = 0;
while (sz_remaining != 0) {
tx_size = sz_remaining > SKL_ADSP_W1_SZ
? SKL_ADSP_W1_SZ : sz_remaining;
if (tx_size == sz_remaining)
header.extension |= IPC_FINAL_BLOCK(1);
dev_dbg(ipc->dev, "In %s primary=%#x ext=%#x\n", __func__,
header.primary, header.extension);
dev_dbg(ipc->dev, "transmitting offset: %#x, size: %#x\n",
(unsigned)data_offset, (unsigned)tx_size);
ret = sst_ipc_tx_message_wait(ipc, *ipc_header,
((char *)param) + data_offset,
tx_size, NULL, 0);
if (ret < 0) {
dev_err(ipc->dev,
"ipc: set large config fail, err: %d\n", ret);
return ret;
}
sz_remaining -= tx_size;
data_offset = msg->param_data_size - sz_remaining;
/* clear the fields */
header.extension &= IPC_INITIAL_BLOCK_CLEAR;
header.extension &= IPC_DATA_OFFSET_SZ_CLEAR;
/* fill the fields */
header.extension |= IPC_INITIAL_BLOCK(0);
header.extension |= IPC_DATA_OFFSET_SZ(data_offset);
}
return ret;
}
EXPORT_SYMBOL_GPL(skl_ipc_set_large_config);

View file

@ -0,0 +1,125 @@
/*
* Intel SKL IPC Support
*
* Copyright (C) 2014-15, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#ifndef __SKL_IPC_H
#define __SKL_IPC_H
#include <linux/kthread.h>
#include <linux/irqreturn.h>
#include "../common/sst-ipc.h"
struct sst_dsp;
struct skl_sst;
struct sst_generic_ipc;
enum skl_ipc_pipeline_state {
PPL_INVALID_STATE = 0,
PPL_UNINITIALIZED = 1,
PPL_RESET = 2,
PPL_PAUSED = 3,
PPL_RUNNING = 4,
PPL_ERROR_STOP = 5,
PPL_SAVED = 6,
PPL_RESTORED = 7
};
struct skl_ipc_dxstate_info {
u32 core_mask;
u32 dx_mask;
};
struct skl_ipc_header {
u32 primary;
u32 extension;
};
struct skl_sst {
struct device *dev;
struct sst_dsp *dsp;
/* boot */
wait_queue_head_t boot_wait;
bool boot_complete;
/* IPC messaging */
struct sst_generic_ipc ipc;
};
struct skl_ipc_init_instance_msg {
u32 module_id;
u32 instance_id;
u16 param_data_size;
u8 ppl_instance_id;
u8 core_id;
};
struct skl_ipc_bind_unbind_msg {
u32 module_id;
u32 instance_id;
u32 dst_module_id;
u32 dst_instance_id;
u8 src_queue;
u8 dst_queue;
bool bind;
};
struct skl_ipc_large_config_msg {
u32 module_id;
u32 instance_id;
u32 large_param_id;
u32 param_data_size;
};
#define SKL_IPC_BOOT_MSECS 3000
#define SKL_IPC_D3_MASK 0
#define SKL_IPC_D0_MASK 3
irqreturn_t skl_dsp_irq_thread_handler(int irq, void *context);
int skl_ipc_create_pipeline(struct sst_generic_ipc *sst_ipc,
u16 ppl_mem_size, u8 ppl_type, u8 instance_id);
int skl_ipc_delete_pipeline(struct sst_generic_ipc *sst_ipc, u8 instance_id);
int skl_ipc_set_pipeline_state(struct sst_generic_ipc *sst_ipc,
u8 instance_id, enum skl_ipc_pipeline_state state);
int skl_ipc_save_pipeline(struct sst_generic_ipc *ipc,
u8 instance_id, int dma_id);
int skl_ipc_restore_pipeline(struct sst_generic_ipc *ipc, u8 instance_id);
int skl_ipc_init_instance(struct sst_generic_ipc *sst_ipc,
struct skl_ipc_init_instance_msg *msg, void *param_data);
int skl_ipc_bind_unbind(struct sst_generic_ipc *sst_ipc,
struct skl_ipc_bind_unbind_msg *msg);
int skl_ipc_set_dx(struct sst_generic_ipc *ipc,
u8 instance_id, u16 module_id, struct skl_ipc_dxstate_info *dx);
int skl_ipc_set_large_config(struct sst_generic_ipc *ipc,
struct skl_ipc_large_config_msg *msg, u32 *param);
void skl_ipc_int_enable(struct sst_dsp *dsp);
void skl_ipc_op_int_enable(struct sst_dsp *ctx);
void skl_ipc_int_disable(struct sst_dsp *dsp);
bool skl_ipc_int_status(struct sst_dsp *dsp);
void skl_ipc_free(struct sst_generic_ipc *ipc);
int skl_ipc_init(struct device *dev, struct skl_sst *skl);
#endif /* __SKL_IPC_H */

View file

@ -0,0 +1,280 @@
/*
* skl-sst.c - HDA DSP library functions for SKL platform
*
* Copyright (C) 2014-15, Intel Corporation.
* Author:Rafal Redzimski <rafal.f.redzimski@intel.com>
* Jeeja KP <jeeja.kp@intel.com>
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/device.h>
#include "../common/sst-dsp.h"
#include "../common/sst-dsp-priv.h"
#include "../common/sst-ipc.h"
#include "skl-sst-ipc.h"
#define SKL_BASEFW_TIMEOUT 300
#define SKL_INIT_TIMEOUT 1000
/* Intel HD Audio SRAM Window 0*/
#define SKL_ADSP_SRAM0_BASE 0x8000
/* Firmware status window */
#define SKL_ADSP_FW_STATUS SKL_ADSP_SRAM0_BASE
#define SKL_ADSP_ERROR_CODE (SKL_ADSP_FW_STATUS + 0x4)
#define SKL_INSTANCE_ID 0
#define SKL_BASE_FW_MODULE_ID 0
static bool skl_check_fw_status(struct sst_dsp *ctx, u32 status)
{
u32 cur_sts;
cur_sts = sst_dsp_shim_read(ctx, SKL_ADSP_FW_STATUS) & SKL_FW_STS_MASK;
return (cur_sts == status);
}
static int skl_transfer_firmware(struct sst_dsp *ctx,
const void *basefw, u32 base_fw_size)
{
int ret = 0;
ret = ctx->cl_dev.ops.cl_copy_to_dmabuf(ctx, basefw, base_fw_size);
if (ret < 0)
return ret;
ret = sst_dsp_register_poll(ctx,
SKL_ADSP_FW_STATUS,
SKL_FW_STS_MASK,
SKL_FW_RFW_START,
SKL_BASEFW_TIMEOUT,
"Firmware boot");
ctx->cl_dev.ops.cl_stop_dma(ctx);
return ret;
}
static int skl_load_base_firmware(struct sst_dsp *ctx)
{
int ret = 0, i;
const struct firmware *fw = NULL;
struct skl_sst *skl = ctx->thread_context;
u32 reg;
ret = request_firmware(&fw, "dsp_fw_release.bin", ctx->dev);
if (ret < 0) {
dev_err(ctx->dev, "Request firmware failed %d\n", ret);
skl_dsp_disable_core(ctx);
return -EIO;
}
/* enable Interrupt */
skl_ipc_int_enable(ctx);
skl_ipc_op_int_enable(ctx);
/* check ROM Status */
for (i = SKL_INIT_TIMEOUT; i > 0; --i) {
if (skl_check_fw_status(ctx, SKL_FW_INIT)) {
dev_dbg(ctx->dev,
"ROM loaded, we can continue with FW loading\n");
break;
}
mdelay(1);
}
if (!i) {
reg = sst_dsp_shim_read(ctx, SKL_ADSP_FW_STATUS);
dev_err(ctx->dev,
"Timeout waiting for ROM init done, reg:0x%x\n", reg);
ret = -EIO;
goto skl_load_base_firmware_failed;
}
ret = skl_transfer_firmware(ctx, fw->data, fw->size);
if (ret < 0) {
dev_err(ctx->dev, "Transfer firmware failed%d\n", ret);
goto skl_load_base_firmware_failed;
} else {
ret = wait_event_timeout(skl->boot_wait, skl->boot_complete,
msecs_to_jiffies(SKL_IPC_BOOT_MSECS));
if (ret == 0) {
dev_err(ctx->dev, "DSP boot failed, FW Ready timed-out\n");
ret = -EIO;
goto skl_load_base_firmware_failed;
}
dev_dbg(ctx->dev, "Download firmware successful%d\n", ret);
skl_dsp_set_state_locked(ctx, SKL_DSP_RUNNING);
}
release_firmware(fw);
return 0;
skl_load_base_firmware_failed:
skl_dsp_disable_core(ctx);
release_firmware(fw);
return ret;
}
static int skl_set_dsp_D0(struct sst_dsp *ctx)
{
int ret;
ret = skl_load_base_firmware(ctx);
if (ret < 0) {
dev_err(ctx->dev, "unable to load firmware\n");
return ret;
}
skl_dsp_set_state_locked(ctx, SKL_DSP_RUNNING);
return ret;
}
static int skl_set_dsp_D3(struct sst_dsp *ctx)
{
int ret;
struct skl_ipc_dxstate_info dx;
struct skl_sst *skl = ctx->thread_context;
dev_dbg(ctx->dev, "In %s:\n", __func__);
mutex_lock(&ctx->mutex);
if (!is_skl_dsp_running(ctx)) {
mutex_unlock(&ctx->mutex);
return 0;
}
mutex_unlock(&ctx->mutex);
dx.core_mask = SKL_DSP_CORE0_MASK;
dx.dx_mask = SKL_IPC_D3_MASK;
ret = skl_ipc_set_dx(&skl->ipc, SKL_INSTANCE_ID, SKL_BASE_FW_MODULE_ID, &dx);
if (ret < 0) {
dev_err(ctx->dev, "Failed to set DSP to D3 state\n");
return ret;
}
ret = skl_dsp_disable_core(ctx);
if (ret < 0) {
dev_err(ctx->dev, "disable dsp core failed ret: %d\n", ret);
ret = -EIO;
}
skl_dsp_set_state_locked(ctx, SKL_DSP_RESET);
return ret;
}
static unsigned int skl_get_errorcode(struct sst_dsp *ctx)
{
return sst_dsp_shim_read(ctx, SKL_ADSP_ERROR_CODE);
}
static struct skl_dsp_fw_ops skl_fw_ops = {
.set_state_D0 = skl_set_dsp_D0,
.set_state_D3 = skl_set_dsp_D3,
.load_fw = skl_load_base_firmware,
.get_fw_errcode = skl_get_errorcode,
};
static struct sst_ops skl_ops = {
.irq_handler = skl_dsp_sst_interrupt,
.write = sst_shim32_write,
.read = sst_shim32_read,
.ram_read = sst_memcpy_fromio_32,
.ram_write = sst_memcpy_toio_32,
.free = skl_dsp_free,
};
static struct sst_dsp_device skl_dev = {
.thread = skl_dsp_irq_thread_handler,
.ops = &skl_ops,
};
int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
struct skl_dsp_loader_ops dsp_ops, struct skl_sst **dsp)
{
struct skl_sst *skl;
struct sst_dsp *sst;
int ret;
skl = devm_kzalloc(dev, sizeof(*skl), GFP_KERNEL);
if (skl == NULL)
return -ENOMEM;
skl->dev = dev;
skl_dev.thread_context = skl;
skl->dsp = skl_dsp_ctx_init(dev, &skl_dev, irq);
if (!skl->dsp) {
dev_err(skl->dev, "%s: no device\n", __func__);
return -ENODEV;
}
sst = skl->dsp;
sst->addr.lpe = mmio_base;
sst->addr.shim = mmio_base;
sst_dsp_mailbox_init(sst, (SKL_ADSP_SRAM0_BASE + SKL_ADSP_W0_STAT_SZ),
SKL_ADSP_W0_UP_SZ, SKL_ADSP_SRAM1_BASE, SKL_ADSP_W1_SZ);
sst->dsp_ops = dsp_ops;
sst->fw_ops = skl_fw_ops;
ret = skl_ipc_init(dev, skl);
if (ret)
return ret;
skl->boot_complete = false;
init_waitqueue_head(&skl->boot_wait);
ret = skl_dsp_boot(sst);
if (ret < 0) {
dev_err(skl->dev, "Boot dsp core failed ret: %d", ret);
goto free_ipc;
}
ret = skl_cldma_prepare(sst);
if (ret < 0) {
dev_err(dev, "CL dma prepare failed : %d", ret);
goto free_ipc;
}
ret = sst->fw_ops.load_fw(sst);
if (ret < 0) {
dev_err(dev, "Load base fw failed : %d", ret);
return ret;
}
if (dsp)
*dsp = skl;
return 0;
free_ipc:
skl_ipc_free(&skl->ipc);
return ret;
}
EXPORT_SYMBOL_GPL(skl_sst_dsp_init);
void skl_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx)
{
skl_ipc_free(&ctx->ipc);
ctx->dsp->cl_dev.ops.cl_cleanup_controller(ctx->dsp);
ctx->dsp->ops->free(ctx->dsp);
}
EXPORT_SYMBOL_GPL(skl_sst_dsp_cleanup);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Intel Skylake IPC driver");

View file

@ -0,0 +1,286 @@
/*
* skl_topology.h - Intel HDA Platform topology header file
*
* Copyright (C) 2014-15 Intel Corp
* Author: Jeeja KP <jeeja.kp@intel.com>
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
*/
#ifndef __SKL_TOPOLOGY_H__
#define __SKL_TOPOLOGY_H__
#include <linux/types.h>
#include <sound/hdaudio_ext.h>
#include <sound/soc.h>
#include "skl.h"
#include "skl-tplg-interface.h"
#define BITS_PER_BYTE 8
#define MAX_TS_GROUPS 8
#define MAX_DMIC_TS_GROUPS 4
#define MAX_FIXED_DMIC_PARAMS_SIZE 727
/* Maximum number of coefficients up down mixer module */
#define UP_DOWN_MIXER_MAX_COEFF 6
enum skl_channel_index {
SKL_CHANNEL_LEFT = 0,
SKL_CHANNEL_RIGHT = 1,
SKL_CHANNEL_CENTER = 2,
SKL_CHANNEL_LEFT_SURROUND = 3,
SKL_CHANNEL_CENTER_SURROUND = 3,
SKL_CHANNEL_RIGHT_SURROUND = 4,
SKL_CHANNEL_LFE = 7,
SKL_CHANNEL_INVALID = 0xF,
};
enum skl_bitdepth {
SKL_DEPTH_8BIT = 8,
SKL_DEPTH_16BIT = 16,
SKL_DEPTH_24BIT = 24,
SKL_DEPTH_32BIT = 32,
SKL_DEPTH_INVALID
};
enum skl_interleaving {
/* [s1_ch1...s1_chN,...,sM_ch1...sM_chN] */
SKL_INTERLEAVING_PER_CHANNEL = 0,
/* [s1_ch1...sM_ch1,...,s1_chN...sM_chN] */
SKL_INTERLEAVING_PER_SAMPLE = 1,
};
enum skl_s_freq {
SKL_FS_8000 = 8000,
SKL_FS_11025 = 11025,
SKL_FS_12000 = 12000,
SKL_FS_16000 = 16000,
SKL_FS_22050 = 22050,
SKL_FS_24000 = 24000,
SKL_FS_32000 = 32000,
SKL_FS_44100 = 44100,
SKL_FS_48000 = 48000,
SKL_FS_64000 = 64000,
SKL_FS_88200 = 88200,
SKL_FS_96000 = 96000,
SKL_FS_128000 = 128000,
SKL_FS_176400 = 176400,
SKL_FS_192000 = 192000,
SKL_FS_INVALID
};
enum skl_widget_type {
SKL_WIDGET_VMIXER = 1,
SKL_WIDGET_MIXER = 2,
SKL_WIDGET_PGA = 3,
SKL_WIDGET_MUX = 4
};
struct skl_audio_data_format {
enum skl_s_freq s_freq;
enum skl_bitdepth bit_depth;
u32 channel_map;
enum skl_ch_cfg ch_cfg;
enum skl_interleaving interleaving;
u8 number_of_channels;
u8 valid_bit_depth;
u8 sample_type;
u8 reserved[1];
} __packed;
struct skl_base_cfg {
u32 cps;
u32 ibs;
u32 obs;
u32 is_pages;
struct skl_audio_data_format audio_fmt;
};
struct skl_cpr_gtw_cfg {
u32 node_id;
u32 dma_buffer_size;
u32 config_length;
/* not mandatory; required only for DMIC/I2S */
u32 config_data[1];
} __packed;
struct skl_cpr_cfg {
struct skl_base_cfg base_cfg;
struct skl_audio_data_format out_fmt;
u32 cpr_feature_mask;
struct skl_cpr_gtw_cfg gtw_cfg;
} __packed;
struct skl_src_module_cfg {
struct skl_base_cfg base_cfg;
enum skl_s_freq src_cfg;
} __packed;
struct skl_up_down_mixer_cfg {
struct skl_base_cfg base_cfg;
enum skl_ch_cfg out_ch_cfg;
/* This should be set to 1 if user coefficients are required */
u32 coeff_sel;
/* Pass the user coeff in this array */
s32 coeff[UP_DOWN_MIXER_MAX_COEFF];
} __packed;
enum skl_dma_type {
SKL_DMA_HDA_HOST_OUTPUT_CLASS = 0,
SKL_DMA_HDA_HOST_INPUT_CLASS = 1,
SKL_DMA_HDA_HOST_INOUT_CLASS = 2,
SKL_DMA_HDA_LINK_OUTPUT_CLASS = 8,
SKL_DMA_HDA_LINK_INPUT_CLASS = 9,
SKL_DMA_HDA_LINK_INOUT_CLASS = 0xA,
SKL_DMA_DMIC_LINK_INPUT_CLASS = 0xB,
SKL_DMA_I2S_LINK_OUTPUT_CLASS = 0xC,
SKL_DMA_I2S_LINK_INPUT_CLASS = 0xD,
};
union skl_ssp_dma_node {
u8 val;
struct {
u8 dual_mono:1;
u8 time_slot:3;
u8 i2s_instance:4;
} dma_node;
};
union skl_connector_node_id {
u32 val;
struct {
u32 vindex:8;
u32 dma_type:4;
u32 rsvd:20;
} node;
};
struct skl_module_fmt {
u32 channels;
u32 s_freq;
u32 bit_depth;
u32 valid_bit_depth;
u32 ch_cfg;
};
struct skl_module_inst_id {
u32 module_id;
u32 instance_id;
};
struct skl_module_pin {
struct skl_module_inst_id id;
u8 pin_index;
bool is_dynamic;
bool in_use;
};
struct skl_specific_cfg {
u32 caps_size;
u32 *caps;
};
enum skl_pipe_state {
SKL_PIPE_INVALID = 0,
SKL_PIPE_CREATED = 1,
SKL_PIPE_PAUSED = 2,
SKL_PIPE_STARTED = 3
};
struct skl_pipe_module {
struct snd_soc_dapm_widget *w;
struct list_head node;
};
struct skl_pipe_params {
u8 host_dma_id;
u8 link_dma_id;
u32 ch;
u32 s_freq;
u32 s_fmt;
u8 linktype;
int stream;
};
struct skl_pipe {
u8 ppl_id;
u8 pipe_priority;
u16 conn_type;
u32 memory_pages;
struct skl_pipe_params *p_params;
enum skl_pipe_state state;
struct list_head w_list;
};
enum skl_module_state {
SKL_MODULE_UNINIT = 0,
SKL_MODULE_INIT_DONE = 1,
SKL_MODULE_LOADED = 2,
SKL_MODULE_UNLOADED = 3,
SKL_MODULE_BIND_DONE = 4
};
struct skl_module_cfg {
struct skl_module_inst_id id;
struct skl_module_fmt in_fmt;
struct skl_module_fmt out_fmt;
u8 max_in_queue;
u8 max_out_queue;
u8 in_queue_mask;
u8 out_queue_mask;
u8 in_queue;
u8 out_queue;
u32 mcps;
u32 ibs;
u32 obs;
u8 is_loadable;
u8 core_id;
u8 dev_type;
u8 dma_id;
u8 time_slot;
u32 params_fixup;
u32 converter;
u32 vbus_id;
struct skl_module_pin *m_in_pin;
struct skl_module_pin *m_out_pin;
enum skl_module_type m_type;
enum skl_hw_conn_type hw_conn_type;
enum skl_module_state m_state;
struct skl_pipe *pipe;
struct skl_specific_cfg formats_config;
};
int skl_create_pipeline(struct skl_sst *ctx, struct skl_pipe *pipe);
int skl_run_pipe(struct skl_sst *ctx, struct skl_pipe *pipe);
int skl_pause_pipe(struct skl_sst *ctx, struct skl_pipe *pipe);
int skl_delete_pipe(struct skl_sst *ctx, struct skl_pipe *pipe);
int skl_stop_pipe(struct skl_sst *ctx, struct skl_pipe *pipe);
int skl_init_module(struct skl_sst *ctx, struct skl_module_cfg *module_config,
char *param);
int skl_bind_modules(struct skl_sst *ctx, struct skl_module_cfg
*src_module, struct skl_module_cfg *dst_module);
int skl_unbind_modules(struct skl_sst *ctx, struct skl_module_cfg
*src_module, struct skl_module_cfg *dst_module);
enum skl_bitdepth skl_get_bit_depth(int params);
#endif

View file

@ -0,0 +1,88 @@
/*
* skl-tplg-interface.h - Intel DSP FW private data interface
*
* Copyright (C) 2015 Intel Corp
* Author: Jeeja KP <jeeja.kp@intel.com>
* Nilofer, Samreen <samreen.nilofer@intel.com>
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#ifndef __HDA_TPLG_INTERFACE_H__
#define __HDA_TPLG_INTERFACE_H__
/**
* enum skl_ch_cfg - channel configuration
*
* @SKL_CH_CFG_MONO: One channel only
* @SKL_CH_CFG_STEREO: L & R
* @SKL_CH_CFG_2_1: L, R & LFE
* @SKL_CH_CFG_3_0: L, C & R
* @SKL_CH_CFG_3_1: L, C, R & LFE
* @SKL_CH_CFG_QUATRO: L, R, Ls & Rs
* @SKL_CH_CFG_4_0: L, C, R & Cs
* @SKL_CH_CFG_5_0: L, C, R, Ls & Rs
* @SKL_CH_CFG_5_1: L, C, R, Ls, Rs & LFE
* @SKL_CH_CFG_DUAL_MONO: One channel replicated in two
* @SKL_CH_CFG_I2S_DUAL_STEREO_0: Stereo(L,R) in 4 slots, 1st stream:[ L, R, -, - ]
* @SKL_CH_CFG_I2S_DUAL_STEREO_1: Stereo(L,R) in 4 slots, 2nd stream:[ -, -, L, R ]
* @SKL_CH_CFG_INVALID: Invalid
*/
enum skl_ch_cfg {
SKL_CH_CFG_MONO = 0,
SKL_CH_CFG_STEREO = 1,
SKL_CH_CFG_2_1 = 2,
SKL_CH_CFG_3_0 = 3,
SKL_CH_CFG_3_1 = 4,
SKL_CH_CFG_QUATRO = 5,
SKL_CH_CFG_4_0 = 6,
SKL_CH_CFG_5_0 = 7,
SKL_CH_CFG_5_1 = 8,
SKL_CH_CFG_DUAL_MONO = 9,
SKL_CH_CFG_I2S_DUAL_STEREO_0 = 10,
SKL_CH_CFG_I2S_DUAL_STEREO_1 = 11,
SKL_CH_CFG_INVALID
};
enum skl_module_type {
SKL_MODULE_TYPE_MIXER = 0,
SKL_MODULE_TYPE_COPIER,
SKL_MODULE_TYPE_UPDWMIX,
SKL_MODULE_TYPE_SRCINT
};
enum skl_core_affinity {
SKL_AFFINITY_CORE_0 = 0,
SKL_AFFINITY_CORE_1,
SKL_AFFINITY_CORE_MAX
};
enum skl_pipe_conn_type {
SKL_PIPE_CONN_TYPE_NONE = 0,
SKL_PIPE_CONN_TYPE_FE,
SKL_PIPE_CONN_TYPE_BE
};
enum skl_hw_conn_type {
SKL_CONN_NONE = 0,
SKL_CONN_SOURCE = 1,
SKL_CONN_SINK = 2
};
enum skl_dev_type {
SKL_DEVICE_BT = 0x0,
SKL_DEVICE_DMIC = 0x1,
SKL_DEVICE_I2S = 0x2,
SKL_DEVICE_SLIMBUS = 0x3,
SKL_DEVICE_HDALINK = 0x4,
SKL_DEVICE_NONE
};
#endif

View file

@ -0,0 +1,536 @@
/*
* skl.c - Implementation of ASoC Intel SKL HD Audio driver
*
* Copyright (C) 2014-2015 Intel Corp
* Author: Jeeja KP <jeeja.kp@intel.com>
*
* Derived mostly from Intel HDA driver with following copyrights:
* Copyright (c) 2004 Takashi Iwai <tiwai@suse.de>
* PeiSen Hou <pshou@realtek.com.tw>
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
#include <sound/pcm.h>
#include "skl.h"
/*
* initialize the PCI registers
*/
static void skl_update_pci_byte(struct pci_dev *pci, unsigned int reg,
unsigned char mask, unsigned char val)
{
unsigned char data;
pci_read_config_byte(pci, reg, &data);
data &= ~mask;
data |= (val & mask);
pci_write_config_byte(pci, reg, data);
}
static void skl_init_pci(struct skl *skl)
{
struct hdac_ext_bus *ebus = &skl->ebus;
/*
* Clear bits 0-2 of PCI register TCSEL (at offset 0x44)
* TCSEL == Traffic Class Select Register, which sets PCI express QOS
* Ensuring these bits are 0 clears playback static on some HD Audio
* codecs.
* The PCI register TCSEL is defined in the Intel manuals.
*/
dev_dbg(ebus_to_hbus(ebus)->dev, "Clearing TCSEL\n");
skl_update_pci_byte(skl->pci, AZX_PCIREG_TCSEL, 0x07, 0);
}
/* called from IRQ */
static void skl_stream_update(struct hdac_bus *bus, struct hdac_stream *hstr)
{
snd_pcm_period_elapsed(hstr->substream);
}
static irqreturn_t skl_interrupt(int irq, void *dev_id)
{
struct hdac_ext_bus *ebus = dev_id;
struct hdac_bus *bus = ebus_to_hbus(ebus);
u32 status;
if (!pm_runtime_active(bus->dev))
return IRQ_NONE;
spin_lock(&bus->reg_lock);
status = snd_hdac_chip_readl(bus, INTSTS);
if (status == 0 || status == 0xffffffff) {
spin_unlock(&bus->reg_lock);
return IRQ_NONE;
}
/* clear rirb int */
status = snd_hdac_chip_readb(bus, RIRBSTS);
if (status & RIRB_INT_MASK) {
if (status & RIRB_INT_RESPONSE)
snd_hdac_bus_update_rirb(bus);
snd_hdac_chip_writeb(bus, RIRBSTS, RIRB_INT_MASK);
}
spin_unlock(&bus->reg_lock);
return snd_hdac_chip_readl(bus, INTSTS) ? IRQ_WAKE_THREAD : IRQ_HANDLED;
}
static irqreturn_t skl_threaded_handler(int irq, void *dev_id)
{
struct hdac_ext_bus *ebus = dev_id;
struct hdac_bus *bus = ebus_to_hbus(ebus);
u32 status;
status = snd_hdac_chip_readl(bus, INTSTS);
snd_hdac_bus_handle_stream_irq(bus, status, skl_stream_update);
return IRQ_HANDLED;
}
static int skl_acquire_irq(struct hdac_ext_bus *ebus, int do_disconnect)
{
struct skl *skl = ebus_to_skl(ebus);
struct hdac_bus *bus = ebus_to_hbus(ebus);
int ret;
ret = request_threaded_irq(skl->pci->irq, skl_interrupt,
skl_threaded_handler,
IRQF_SHARED,
KBUILD_MODNAME, ebus);
if (ret) {
dev_err(bus->dev,
"unable to grab IRQ %d, disabling device\n",
skl->pci->irq);
return ret;
}
bus->irq = skl->pci->irq;
pci_intx(skl->pci, 1);
return 0;
}
#ifdef CONFIG_PM_SLEEP
/*
* power management
*/
static int skl_suspend(struct device *dev)
{
struct pci_dev *pci = to_pci_dev(dev);
struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
struct hdac_bus *bus = ebus_to_hbus(ebus);
snd_hdac_bus_stop_chip(bus);
snd_hdac_bus_enter_link_reset(bus);
return 0;
}
static int skl_resume(struct device *dev)
{
struct pci_dev *pci = to_pci_dev(dev);
struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
struct hdac_bus *bus = ebus_to_hbus(ebus);
struct skl *hda = ebus_to_skl(ebus);
skl_init_pci(hda);
snd_hdac_bus_init_chip(bus, 1);
return 0;
}
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM
static int skl_runtime_suspend(struct device *dev)
{
struct pci_dev *pci = to_pci_dev(dev);
struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
struct hdac_bus *bus = ebus_to_hbus(ebus);
dev_dbg(bus->dev, "in %s\n", __func__);
/* enable controller wake up event */
snd_hdac_chip_updatew(bus, WAKEEN, 0, STATESTS_INT_MASK);
snd_hdac_bus_stop_chip(bus);
snd_hdac_bus_enter_link_reset(bus);
return 0;
}
static int skl_runtime_resume(struct device *dev)
{
struct pci_dev *pci = to_pci_dev(dev);
struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
struct hdac_bus *bus = ebus_to_hbus(ebus);
struct skl *hda = ebus_to_skl(ebus);
int status;
dev_dbg(bus->dev, "in %s\n", __func__);
/* Read STATESTS before controller reset */
status = snd_hdac_chip_readw(bus, STATESTS);
skl_init_pci(hda);
snd_hdac_bus_init_chip(bus, true);
/* disable controller Wake Up event */
snd_hdac_chip_updatew(bus, WAKEEN, STATESTS_INT_MASK, 0);
return 0;
}
#endif /* CONFIG_PM */
static const struct dev_pm_ops skl_pm = {
SET_SYSTEM_SLEEP_PM_OPS(skl_suspend, skl_resume)
SET_RUNTIME_PM_OPS(skl_runtime_suspend, skl_runtime_resume, NULL)
};
/*
* destructor
*/
static int skl_free(struct hdac_ext_bus *ebus)
{
struct skl *skl = ebus_to_skl(ebus);
struct hdac_bus *bus = ebus_to_hbus(ebus);
skl->init_failed = 1; /* to be sure */
snd_hdac_ext_stop_streams(ebus);
if (bus->irq >= 0)
free_irq(bus->irq, (void *)bus);
if (bus->remap_addr)
iounmap(bus->remap_addr);
snd_hdac_bus_free_stream_pages(bus);
snd_hdac_stream_free_all(ebus);
snd_hdac_link_free_all(ebus);
pci_release_regions(skl->pci);
pci_disable_device(skl->pci);
snd_hdac_ext_bus_exit(ebus);
return 0;
}
static int skl_dmic_device_register(struct skl *skl)
{
struct hdac_bus *bus = ebus_to_hbus(&skl->ebus);
struct platform_device *pdev;
int ret;
/* SKL has one dmic port, so allocate dmic device for this */
pdev = platform_device_alloc("dmic-codec", -1);
if (!pdev) {
dev_err(bus->dev, "failed to allocate dmic device\n");
return -ENOMEM;
}
ret = platform_device_add(pdev);
if (ret) {
dev_err(bus->dev, "failed to add dmic device: %d\n", ret);
platform_device_put(pdev);
return ret;
}
skl->dmic_dev = pdev;
return 0;
}
static void skl_dmic_device_unregister(struct skl *skl)
{
if (skl->dmic_dev)
platform_device_unregister(skl->dmic_dev);
}
/*
* Probe the given codec address
*/
static int probe_codec(struct hdac_ext_bus *ebus, int addr)
{
struct hdac_bus *bus = ebus_to_hbus(ebus);
unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
(AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
unsigned int res;
mutex_lock(&bus->cmd_mutex);
snd_hdac_bus_send_cmd(bus, cmd);
snd_hdac_bus_get_response(bus, addr, &res);
mutex_unlock(&bus->cmd_mutex);
if (res == -1)
return -EIO;
dev_dbg(bus->dev, "codec #%d probed OK\n", addr);
return snd_hdac_ext_bus_device_init(ebus, addr);
}
/* Codec initialization */
static int skl_codec_create(struct hdac_ext_bus *ebus)
{
struct hdac_bus *bus = ebus_to_hbus(ebus);
int c, max_slots;
max_slots = HDA_MAX_CODECS;
/* First try to probe all given codec slots */
for (c = 0; c < max_slots; c++) {
if ((bus->codec_mask & (1 << c))) {
if (probe_codec(ebus, c) < 0) {
/*
* Some BIOSen give you wrong codec addresses
* that don't exist
*/
dev_warn(bus->dev,
"Codec #%d probe error; disabling it...\n", c);
bus->codec_mask &= ~(1 << c);
/*
* More badly, accessing to a non-existing
* codec often screws up the controller bus,
* and disturbs the further communications.
* Thus if an error occurs during probing,
* better to reset the controller bus to get
* back to the sanity state.
*/
snd_hdac_bus_stop_chip(bus);
snd_hdac_bus_init_chip(bus, true);
}
}
}
return 0;
}
static const struct hdac_bus_ops bus_core_ops = {
.command = snd_hdac_bus_send_cmd,
.get_response = snd_hdac_bus_get_response,
};
/*
* constructor
*/
static int skl_create(struct pci_dev *pci,
const struct hdac_io_ops *io_ops,
struct skl **rskl)
{
struct skl *skl;
struct hdac_ext_bus *ebus;
int err;
*rskl = NULL;
err = pci_enable_device(pci);
if (err < 0)
return err;
skl = devm_kzalloc(&pci->dev, sizeof(*skl), GFP_KERNEL);
if (!skl) {
pci_disable_device(pci);
return -ENOMEM;
}
ebus = &skl->ebus;
snd_hdac_ext_bus_init(ebus, &pci->dev, &bus_core_ops, io_ops);
ebus->bus.use_posbuf = 1;
skl->pci = pci;
ebus->bus.bdl_pos_adj = 0;
*rskl = skl;
return 0;
}
static int skl_first_init(struct hdac_ext_bus *ebus)
{
struct skl *skl = ebus_to_skl(ebus);
struct hdac_bus *bus = ebus_to_hbus(ebus);
struct pci_dev *pci = skl->pci;
int err;
unsigned short gcap;
int cp_streams, pb_streams, start_idx;
err = pci_request_regions(pci, "Skylake HD audio");
if (err < 0)
return err;
bus->addr = pci_resource_start(pci, 0);
bus->remap_addr = pci_ioremap_bar(pci, 0);
if (bus->remap_addr == NULL) {
dev_err(bus->dev, "ioremap error\n");
return -ENXIO;
}
snd_hdac_ext_bus_parse_capabilities(ebus);
if (skl_acquire_irq(ebus, 0) < 0)
return -EBUSY;
pci_set_master(pci);
synchronize_irq(bus->irq);
gcap = snd_hdac_chip_readw(bus, GCAP);
dev_dbg(bus->dev, "chipset global capabilities = 0x%x\n", gcap);
/* allow 64bit DMA address if supported by H/W */
if (!dma_set_mask(bus->dev, DMA_BIT_MASK(64))) {
dma_set_coherent_mask(bus->dev, DMA_BIT_MASK(64));
} else {
dma_set_mask(bus->dev, DMA_BIT_MASK(32));
dma_set_coherent_mask(bus->dev, DMA_BIT_MASK(32));
}
/* read number of streams from GCAP register */
cp_streams = (gcap >> 8) & 0x0f;
pb_streams = (gcap >> 12) & 0x0f;
if (!pb_streams && !cp_streams)
return -EIO;
ebus->num_streams = cp_streams + pb_streams;
/* initialize streams */
snd_hdac_ext_stream_init_all
(ebus, 0, cp_streams, SNDRV_PCM_STREAM_CAPTURE);
start_idx = cp_streams;
snd_hdac_ext_stream_init_all
(ebus, start_idx, pb_streams, SNDRV_PCM_STREAM_PLAYBACK);
err = snd_hdac_bus_alloc_stream_pages(bus);
if (err < 0)
return err;
/* initialize chip */
skl_init_pci(skl);
snd_hdac_bus_init_chip(bus, true);
/* codec detection */
if (!bus->codec_mask) {
dev_err(bus->dev, "no codecs found!\n");
return -ENODEV;
}
return 0;
}
static int skl_probe(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
struct skl *skl;
struct hdac_ext_bus *ebus = NULL;
struct hdac_bus *bus = NULL;
int err;
/* we use ext core ops, so provide NULL for ops here */
err = skl_create(pci, NULL, &skl);
if (err < 0)
return err;
ebus = &skl->ebus;
bus = ebus_to_hbus(ebus);
err = skl_first_init(ebus);
if (err < 0)
goto out_free;
pci_set_drvdata(skl->pci, ebus);
/* check if dsp is there */
if (ebus->ppcap) {
/* TODO register with dsp IPC */
dev_dbg(bus->dev, "Register dsp\n");
}
if (ebus->mlcap)
snd_hdac_ext_bus_get_ml_capabilities(ebus);
/* create device for soc dmic */
err = skl_dmic_device_register(skl);
if (err < 0)
goto out_free;
/* register platform dai and controls */
err = skl_platform_register(bus->dev);
if (err < 0)
goto out_dmic_free;
/* create codec instances */
err = skl_codec_create(ebus);
if (err < 0)
goto out_unregister;
/*configure PM */
pm_runtime_set_autosuspend_delay(bus->dev, SKL_SUSPEND_DELAY);
pm_runtime_use_autosuspend(bus->dev);
pm_runtime_put_noidle(bus->dev);
pm_runtime_allow(bus->dev);
return 0;
out_unregister:
skl_platform_unregister(bus->dev);
out_dmic_free:
skl_dmic_device_unregister(skl);
out_free:
skl->init_failed = 1;
skl_free(ebus);
return err;
}
static void skl_remove(struct pci_dev *pci)
{
struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
struct skl *skl = ebus_to_skl(ebus);
if (pci_dev_run_wake(pci))
pm_runtime_get_noresume(&pci->dev);
pci_dev_put(pci);
skl_platform_unregister(&pci->dev);
skl_dmic_device_unregister(skl);
skl_free(ebus);
dev_set_drvdata(&pci->dev, NULL);
}
/* PCI IDs */
static const struct pci_device_id skl_ids[] = {
/* Sunrise Point-LP */
{ PCI_DEVICE(0x8086, 0x9d70), 0},
{ 0, }
};
MODULE_DEVICE_TABLE(pci, skl_ids);
/* pci_driver definition */
static struct pci_driver skl_driver = {
.name = KBUILD_MODNAME,
.id_table = skl_ids,
.probe = skl_probe,
.remove = skl_remove,
.driver = {
.pm = &skl_pm,
},
};
module_pci_driver(skl_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Intel Skylake ASoC HDA driver");

View file

@ -0,0 +1,84 @@
/*
* skl.h - HD Audio skylake defintions.
*
* Copyright (C) 2015 Intel Corp
* Author: Jeeja KP <jeeja.kp@intel.com>
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
*/
#ifndef __SOUND_SOC_SKL_H
#define __SOUND_SOC_SKL_H
#include <sound/hda_register.h>
#include <sound/hdaudio_ext.h>
#include "skl-nhlt.h"
#define SKL_SUSPEND_DELAY 2000
/* Vendor Specific Registers */
#define AZX_REG_VS_EM1 0x1000
#define AZX_REG_VS_INRC 0x1004
#define AZX_REG_VS_OUTRC 0x1008
#define AZX_REG_VS_FIFOTRK 0x100C
#define AZX_REG_VS_FIFOTRK2 0x1010
#define AZX_REG_VS_EM2 0x1030
#define AZX_REG_VS_EM3L 0x1038
#define AZX_REG_VS_EM3U 0x103C
#define AZX_REG_VS_EM4L 0x1040
#define AZX_REG_VS_EM4U 0x1044
#define AZX_REG_VS_LTRC 0x1048
#define AZX_REG_VS_D0I3C 0x104A
#define AZX_REG_VS_PCE 0x104B
#define AZX_REG_VS_L2MAGC 0x1050
#define AZX_REG_VS_L2LAHPT 0x1054
#define AZX_REG_VS_SDXDPIB_XBASE 0x1084
#define AZX_REG_VS_SDXDPIB_XINTERVAL 0x20
#define AZX_REG_VS_SDXEFIFOS_XBASE 0x1094
#define AZX_REG_VS_SDXEFIFOS_XINTERVAL 0x20
struct skl {
struct hdac_ext_bus ebus;
struct pci_dev *pci;
unsigned int init_failed:1; /* delayed init failed */
struct platform_device *dmic_dev;
void __iomem *nhlt; /* nhlt ptr */
struct skl_sst *skl_sst; /* sst skl ctx */
};
#define skl_to_ebus(s) (&(s)->ebus)
#define ebus_to_skl(sbus) \
container_of(sbus, struct skl, sbus)
/* to pass dai dma data */
struct skl_dma_params {
u32 format;
u8 stream_tag;
};
int skl_platform_unregister(struct device *dev);
int skl_platform_register(struct device *dev);
void __iomem *skl_nhlt_init(struct device *dev);
void skl_nhlt_free(void __iomem *addr);
struct nhlt_specific_cfg *skl_get_ep_blob(struct skl *skl, u32 instance,
u8 link_type, u8 s_fmt, u8 no_ch, u32 s_rate, u8 dirn);
int skl_init_dsp(struct skl *skl);
void skl_free_dsp(struct skl *skl);
int skl_suspend_dsp(struct skl *skl);
int skl_resume_dsp(struct skl *skl);
#endif /* __SOUND_SOC_SKL_H */

View file

@ -148,10 +148,14 @@ static int kirkwood_dma_open(struct snd_pcm_substream *substream)
dram = mv_mbus_dram_info();
addr = substream->dma_buffer.addr;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
if (priv->substream_play)
return -EBUSY;
priv->substream_play = substream;
kirkwood_dma_conf_mbus_windows(priv->io,
KIRKWOOD_PLAYBACK_WIN, addr, dram);
} else {
if (priv->substream_rec)
return -EBUSY;
priv->substream_rec = substream;
kirkwood_dma_conf_mbus_windows(priv->io,
KIRKWOOD_RECORD_WIN, addr, dram);