Merge remote-tracking branch 'asoc/topic/intel' into asoc-next

This commit is contained in:
Mark Brown 2016-12-12 15:52:46 +00:00
commit 52708d05ba
30 changed files with 751 additions and 99 deletions

View File

@ -0,0 +1,17 @@
What: /sys/devices/platform/8086%x:00/firmware_version
Date: November 2016
KernelVersion: 4.10
Contact: "Sebastien Guiriec" <sebastien.guiriec@intel.com>
Description:
LPE Firmware version for SST driver on all atom
plaforms (BYT/CHT/Merrifield/BSW).
If the FW has never been loaded it will display:
"FW not yet loaded"
If FW has been loaded it will display:
"v01.aa.bb.cc"
aa: Major version is reflecting SoC version:
0d: BYT FW
0b: BSW FW
07: Merrifield FW
bb: Minor version
cc: Build version

View File

@ -157,6 +157,10 @@
*
* %SKL_TKN_STR_LIB_NAME: Specifies the library name
*
* %SKL_TKN_U32_PMODE: Specifies the power mode for pipe
*
* %SKL_TKL_U32_D0I3_CAPS: Specifies the D0i3 capability for module
*
* module_id and loadable flags dont have tokens as these values will be
* read from the DSP FW manifest
*/
@ -208,7 +212,9 @@ enum SKL_TKNS {
SKL_TKN_U32_PROC_DOMAIN,
SKL_TKN_U32_LIB_COUNT,
SKL_TKN_STR_LIB_NAME,
SKL_TKN_MAX = SKL_TKN_STR_LIB_NAME,
SKL_TKN_U32_PMODE,
SKL_TKL_U32_D0I3_CAPS,
SKL_TKN_MAX = SKL_TKL_U32_D0I3_CAPS,
};
#endif

View File

@ -937,7 +937,7 @@ int send_ssp_cmd(struct snd_soc_dai *dai, const char *id, bool enable)
struct sst_data *drv = snd_soc_dai_get_drvdata(dai);
int ssp_id;
dev_info(dai->dev, "Enter: enable=%d port_name=%s\n", enable, id);
dev_dbg(dai->dev, "Enter: enable=%d port_name=%s\n", enable, id);
if (strcmp(id, "ssp0-port") == 0)
ssp_id = SSP_MODEM;

View File

@ -27,6 +27,7 @@
#include <linux/pm_qos.h>
#include <linux/async.h>
#include <linux/acpi.h>
#include <linux/sysfs.h>
#include <sound/core.h>
#include <sound/soc.h>
#include <asm/platform_sst_audio.h>
@ -242,6 +243,32 @@ int sst_alloc_drv_context(struct intel_sst_drv **ctx,
}
EXPORT_SYMBOL_GPL(sst_alloc_drv_context);
static ssize_t firmware_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct intel_sst_drv *ctx = dev_get_drvdata(dev);
if (ctx->fw_version.type == 0 && ctx->fw_version.major == 0 &&
ctx->fw_version.minor == 0 && ctx->fw_version.build == 0)
return sprintf(buf, "FW not yet loaded\n");
else
return sprintf(buf, "v%02x.%02x.%02x.%02x\n",
ctx->fw_version.type, ctx->fw_version.major,
ctx->fw_version.minor, ctx->fw_version.build);
}
DEVICE_ATTR_RO(firmware_version);
static const struct attribute *sst_fw_version_attrs[] = {
&dev_attr_firmware_version.attr,
NULL,
};
static const struct attribute_group sst_fw_version_attr_group = {
.attrs = (struct attribute **)sst_fw_version_attrs,
};
int sst_context_init(struct intel_sst_drv *ctx)
{
int ret = 0, i;
@ -315,8 +342,19 @@ int sst_context_init(struct intel_sst_drv *ctx)
dev_err(ctx->dev, "Firmware download failed:%d\n", ret);
goto do_free_mem;
}
ret = sysfs_create_group(&ctx->dev->kobj,
&sst_fw_version_attr_group);
if (ret) {
dev_err(ctx->dev,
"Unable to create sysfs\n");
goto err_sysfs;
}
sst_register(ctx->dev);
return 0;
err_sysfs:
sysfs_remove_group(&ctx->dev->kobj, &sst_fw_version_attr_group);
do_free_mem:
destroy_workqueue(ctx->post_msg_wq);
@ -330,6 +368,7 @@ void sst_context_cleanup(struct intel_sst_drv *ctx)
pm_runtime_disable(ctx->dev);
sst_unregister(ctx->dev);
sst_set_fw_state_locked(ctx, SST_SHUTDOWN);
sysfs_remove_group(&ctx->dev->kobj, &sst_fw_version_attr_group);
flush_scheduled_work();
destroy_workqueue(ctx->post_msg_wq);
pm_qos_remove_request(ctx->qos);

View File

@ -436,6 +436,7 @@ struct intel_sst_drv {
*/
char firmware_name[FW_NAME_SIZE];
struct snd_sst_fw_version fw_version;
struct sst_fw_save *fw_save;
};

View File

@ -236,6 +236,17 @@ static void process_fw_init(struct intel_sst_drv *sst_drv_ctx,
retval = init->result;
goto ret;
}
dev_info(sst_drv_ctx->dev, "FW Version %02x.%02x.%02x.%02x\n",
init->fw_version.type, init->fw_version.major,
init->fw_version.minor, init->fw_version.build);
dev_dbg(sst_drv_ctx->dev, "Build date %s Time %s\n",
init->build_info.date, init->build_info.time);
/* Save FW version */
sst_drv_ctx->fw_version.type = init->fw_version.type;
sst_drv_ctx->fw_version.major = init->fw_version.major;
sst_drv_ctx->fw_version.minor = init->fw_version.minor;
sst_drv_ctx->fw_version.build = init->fw_version.build;
ret:
sst_wake_up_block(sst_drv_ctx, retval, FW_DWNL_ID, 0 , NULL, 0);

View File

@ -104,7 +104,7 @@ int sst_alloc_stream_mrfld(struct intel_sst_drv *sst_drv_ctx, void *params)
sst_init_stream(&sst_drv_ctx->streams[str_id], alloc_param.codec_type,
str_id, alloc_param.operation, 0);
dev_info(sst_drv_ctx->dev, "Alloc for str %d pipe %#x\n",
dev_dbg(sst_drv_ctx->dev, "Alloc for str %d pipe %#x\n",
str_id, pipe_id);
ret = sst_prepare_and_post_msg(sst_drv_ctx, task_id, IPC_CMD,
IPC_IA_ALLOC_STREAM_MRFLD, pipe_id, sizeof(alloc_param),
@ -415,7 +415,7 @@ int sst_free_stream(struct intel_sst_drv *sst_drv_ctx, int str_id)
str_info->status = STREAM_UN_INIT;
mutex_unlock(&str_info->lock);
dev_info(sst_drv_ctx->dev, "Free for str %d pipe %#x\n",
dev_dbg(sst_drv_ctx->dev, "Free for str %d pipe %#x\n",
str_id, str_info->pipe_id);
retval = sst_prepare_and_post_msg(sst_drv_ctx, str_info->task_id, IPC_CMD,
IPC_IA_FREE_STREAM_MRFLD, str_info->pipe_id, 0,

View File

@ -23,7 +23,6 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/kthread.h>
#include <linux/firmware.h>
#include <linux/io.h>
#include <asm/div64.h>
@ -338,7 +337,7 @@ static irqreturn_t sst_byt_irq_thread(int irq, void *context)
spin_unlock_irqrestore(&sst->spinlock, flags);
/* continue to send any remaining messages... */
kthread_queue_work(&ipc->kworker, &ipc->kwork);
schedule_work(&ipc->kwork);
return IRQ_HANDLED;
}

View File

@ -126,7 +126,7 @@ static int broadwell_rt286_hw_params(struct snd_pcm_substream *substream,
return ret;
}
static struct snd_soc_ops broadwell_rt286_ops = {
static const struct snd_soc_ops broadwell_rt286_ops = {
.hw_params = broadwell_rt286_hw_params,
};

View File

@ -30,6 +30,7 @@
#define BXT_DIALOG_CODEC_DAI "da7219-hifi"
#define BXT_MAXIM_CODEC_DAI "HiFi"
#define DUAL_CHANNEL 2
#define QUAD_CHANNEL 4
static struct snd_soc_jack broxton_headset;
@ -182,6 +183,16 @@ static struct snd_pcm_hw_constraint_list constraints_channels = {
.mask = 0,
};
static unsigned int channels_quad[] = {
QUAD_CHANNEL,
};
static struct snd_pcm_hw_constraint_list constraints_channels_quad = {
.count = ARRAY_SIZE(channels_quad),
.list = channels_quad,
.mask = 0,
};
static int bxt_fe_startup(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
@ -258,7 +269,10 @@ static int broxton_dmic_fixup(struct snd_soc_pcm_runtime *rtd,
{
struct snd_interval *channels = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_CHANNELS);
channels->min = channels->max = DUAL_CHANNEL;
if (params_channels(params) == 2)
channels->min = channels->max = 2;
else
channels->min = channels->max = 4;
return 0;
}
@ -267,9 +281,9 @@ static int broxton_dmic_startup(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
runtime->hw.channels_max = DUAL_CHANNEL;
runtime->hw.channels_min = runtime->hw.channels_max = QUAD_CHANNEL;
snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
&constraints_channels);
&constraints_channels_quad);
return snd_pcm_hw_constraint_list(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
@ -348,7 +362,7 @@ static struct snd_soc_dai_link broxton_dais[] = {
.dynamic = 1,
.ops = &broxton_refcap_ops,
},
[BXT_DPCM_AUDIO_DMIC_CP]
[BXT_DPCM_AUDIO_DMIC_CP] =
{
.name = "Bxt Audio DMIC cap",
.stream_name = "dmiccap",

View File

@ -57,9 +57,7 @@ struct byt_rt5640_private {
struct clk *mclk;
};
static unsigned long byt_rt5640_quirk = BYT_RT5640_DMIC1_MAP |
BYT_RT5640_DMIC_EN |
BYT_RT5640_MCLK_EN;
static unsigned long byt_rt5640_quirk = BYT_RT5640_MCLK_EN;
static void log_quirks(struct device *dev)
{
@ -689,6 +687,10 @@ static bool is_valleyview(void)
return true;
}
struct acpi_chan_package { /* ACPICA seems to require 64 bit integers */
u64 aif_value; /* 1: AIF1, 2: AIF2 */
u64 mclock_value; /* usually 25MHz (0x17d7940), ignored */
};
static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
{
@ -698,6 +700,7 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
int i;
int dai_index;
struct byt_rt5640_private *priv;
bool is_bytcr = false;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_ATOMIC);
if (!priv)
@ -734,10 +737,61 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
struct sst_platform_info *p_info = mach->pdata;
const struct sst_res_info *res_info = p_info->res_info;
/* TODO: use CHAN package info from BIOS to detect AIF1/AIF2 */
if (res_info->acpi_ipc_irq_index == 0) {
if (res_info->acpi_ipc_irq_index == 0)
is_bytcr = true;
}
if (is_bytcr) {
/*
* Baytrail CR platforms may have CHAN package in BIOS, try
* to find relevant routing quirk based as done on Windows
* platforms. We have to read the information directly from the
* BIOS, at this stage the card is not created and the links
* with the codec driver/pdata are non-existent
*/
struct acpi_chan_package chan_package;
/* format specified: 2 64-bit integers */
struct acpi_buffer format = {sizeof("NN"), "NN"};
struct acpi_buffer state = {0, NULL};
struct sst_acpi_package_context pkg_ctx;
bool pkg_found = false;
state.length = sizeof(chan_package);
state.pointer = &chan_package;
pkg_ctx.name = "CHAN";
pkg_ctx.length = 2;
pkg_ctx.format = &format;
pkg_ctx.state = &state;
pkg_ctx.data_valid = false;
pkg_found = sst_acpi_find_package_from_hid(mach->id, &pkg_ctx);
if (pkg_found) {
if (chan_package.aif_value == 1) {
dev_info(&pdev->dev, "BIOS Routing: AIF1 connected\n");
byt_rt5640_quirk |= BYT_RT5640_SSP0_AIF1;
} else if (chan_package.aif_value == 2) {
dev_info(&pdev->dev, "BIOS Routing: AIF2 connected\n");
byt_rt5640_quirk |= BYT_RT5640_SSP0_AIF2;
} else {
dev_info(&pdev->dev, "BIOS Routing isn't valid, ignored\n");
pkg_found = false;
}
}
if (!pkg_found) {
/* no BIOS indications, assume SSP0-AIF2 connection */
byt_rt5640_quirk |= BYT_RT5640_SSP0_AIF2;
}
/* change defaults for Baytrail-CR capture */
byt_rt5640_quirk |= BYT_RT5640_IN1_MAP;
byt_rt5640_quirk |= BYT_RT5640_DIFF_MIC;
} else {
byt_rt5640_quirk |= (BYT_RT5640_DMIC1_MAP |
BYT_RT5640_DMIC_EN);
}
/* check quirks before creating card */

View File

@ -25,12 +25,14 @@
#include <sound/jack.h>
#include "../../codecs/rt5670.h"
#include "../atom/sst-atom-controls.h"
#include "../common/sst-acpi.h"
/* The platform clock #3 outputs 19.2Mhz clock to codec as I2S MCLK */
#define CHT_PLAT_CLK_3_HZ 19200000
#define CHT_CODEC_DAI "rt5670-aif1"
static struct snd_soc_jack cht_bsw_headset;
static char cht_bsw_codec_name[16];
/* Headset jack detection DAPM pins */
static struct snd_soc_jack_pin cht_bsw_headset_pins[] = {
@ -339,9 +341,33 @@ static struct snd_soc_card snd_soc_card_cht = {
.resume_post = cht_resume_post,
};
#define RT5672_I2C_DEFAULT "i2c-10EC5670:00"
static int snd_cht_mc_probe(struct platform_device *pdev)
{
int ret_val = 0;
struct sst_acpi_mach *mach = pdev->dev.platform_data;
const char *i2c_name;
int i;
strcpy(cht_bsw_codec_name, RT5672_I2C_DEFAULT);
/* fixup codec name based on HID */
if (mach) {
i2c_name = sst_acpi_find_name_from_hid(mach->id);
if (i2c_name) {
snprintf(cht_bsw_codec_name, sizeof(cht_bsw_codec_name),
"i2c-%s", i2c_name);
for (i = 0; i < ARRAY_SIZE(cht_dailink); i++) {
if (!strcmp(cht_dailink[i].codec_name,
RT5672_I2C_DEFAULT)) {
cht_dailink[i].codec_name =
cht_bsw_codec_name;
break;
}
}
}
}
/* register the soc card */
snd_soc_card_cht.dev = &pdev->dev;

View File

@ -81,9 +81,9 @@ static struct snd_soc_jack_zone mfld_zones[] = {
};
/* sound card controls */
static const char *headset_switch_text[] = {"Earpiece", "Headset"};
static const char * const headset_switch_text[] = {"Earpiece", "Headset"};
static const char *lo_text[] = {"Vibra", "Headset", "IHF", "None"};
static const char * const lo_text[] = {"Vibra", "Headset", "IHF", "None"};
static const struct soc_enum headset_enum =
SOC_ENUM_SINGLE_EXT(2, headset_switch_text);

View File

@ -15,14 +15,29 @@
#include <linux/stddef.h>
#include <linux/acpi.h>
/* translation fron HID to I2C name, needed for DAI codec_name */
struct sst_acpi_package_context {
char *name; /* package name */
int length; /* number of elements */
struct acpi_buffer *format;
struct acpi_buffer *state;
bool data_valid;
};
#if IS_ENABLED(CONFIG_ACPI)
/* translation fron HID to I2C name, needed for DAI codec_name */
const char *sst_acpi_find_name_from_hid(const u8 hid[ACPI_ID_LEN]);
bool sst_acpi_find_package_from_hid(const u8 hid[ACPI_ID_LEN],
struct sst_acpi_package_context *ctx);
#else
static inline const char *sst_acpi_find_name_from_hid(const u8 hid[ACPI_ID_LEN])
{
return NULL;
}
static inline bool sst_acpi_find_package_from_hid(const u8 hid[ACPI_ID_LEN],
struct sst_acpi_package_context *ctx)
{
return false;
}
#endif
/* acpi match */

View File

@ -26,7 +26,6 @@
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/kthread.h>
#include <sound/asound.h>
#include "sst-dsp.h"
@ -109,10 +108,9 @@ static int ipc_tx_message(struct sst_generic_ipc *ipc, u64 header,
ipc->ops.tx_data_copy(msg, tx_data, tx_bytes);
list_add_tail(&msg->list, &ipc->tx_list);
schedule_work(&ipc->kwork);
spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
kthread_queue_work(&ipc->kworker, &ipc->kwork);
if (wait)
return tx_wait_done(ipc, msg, rx_data);
else
@ -156,42 +154,56 @@ static int msg_empty_list_init(struct sst_generic_ipc *ipc)
return -ENOMEM;
}
static void ipc_tx_msgs(struct kthread_work *work)
static void ipc_tx_msgs(struct work_struct *work)
{
struct sst_generic_ipc *ipc =
container_of(work, struct sst_generic_ipc, kwork);
struct ipc_message *msg;
unsigned long flags;
spin_lock_irqsave(&ipc->dsp->spinlock, flags);
spin_lock_irq(&ipc->dsp->spinlock);
if (list_empty(&ipc->tx_list) || ipc->pending) {
spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
return;
while (!list_empty(&ipc->tx_list) && !ipc->pending) {
/* if the DSP is busy, we will TX messages after IRQ.
* also postpone if we are in the middle of processing
* completion irq
*/
if (ipc->ops.is_dsp_busy && ipc->ops.is_dsp_busy(ipc->dsp)) {
dev_dbg(ipc->dev, "ipc_tx_msgs dsp busy\n");
break;
}
msg = list_first_entry(&ipc->tx_list, struct ipc_message, list);
list_move(&msg->list, &ipc->rx_list);
if (ipc->ops.tx_msg != NULL)
ipc->ops.tx_msg(ipc, msg);
}
/* if the DSP is busy, we will TX messages after IRQ.
* also postpone if we are in the middle of procesing completion irq*/
if (ipc->ops.is_dsp_busy && ipc->ops.is_dsp_busy(ipc->dsp)) {
dev_dbg(ipc->dev, "ipc_tx_msgs dsp busy\n");
spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
return;
}
msg = list_first_entry(&ipc->tx_list, struct ipc_message, list);
list_move(&msg->list, &ipc->rx_list);
if (ipc->ops.tx_msg != NULL)
ipc->ops.tx_msg(ipc, msg);
spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
spin_unlock_irq(&ipc->dsp->spinlock);
}
int sst_ipc_tx_message_wait(struct sst_generic_ipc *ipc, u64 header,
void *tx_data, size_t tx_bytes, void *rx_data, size_t rx_bytes)
{
return ipc_tx_message(ipc, header, tx_data, tx_bytes,
int ret;
/*
* DSP maybe in lower power active state, so
* check if the DSP supports DSP lp On method
* if so invoke that before sending IPC
*/
if (ipc->ops.check_dsp_lp_on)
if (ipc->ops.check_dsp_lp_on(ipc->dsp, true))
return -EIO;
ret = ipc_tx_message(ipc, header, tx_data, tx_bytes,
rx_data, rx_bytes, 1);
if (ipc->ops.check_dsp_lp_on)
if (ipc->ops.check_dsp_lp_on(ipc->dsp, false))
return -EIO;
return ret;
}
EXPORT_SYMBOL_GPL(sst_ipc_tx_message_wait);
@ -203,6 +215,14 @@ int sst_ipc_tx_message_nowait(struct sst_generic_ipc *ipc, u64 header,
}
EXPORT_SYMBOL_GPL(sst_ipc_tx_message_nowait);
int sst_ipc_tx_message_nopm(struct sst_generic_ipc *ipc, u64 header,
void *tx_data, size_t tx_bytes, void *rx_data, size_t rx_bytes)
{
return ipc_tx_message(ipc, header, tx_data, tx_bytes,
rx_data, rx_bytes, 1);
}
EXPORT_SYMBOL_GPL(sst_ipc_tx_message_nopm);
struct ipc_message *sst_ipc_reply_find_msg(struct sst_generic_ipc *ipc,
u64 header)
{
@ -280,19 +300,7 @@ int sst_ipc_init(struct sst_generic_ipc *ipc)
if (ret < 0)
return -ENOMEM;
/* start the IPC message thread */
kthread_init_worker(&ipc->kworker);
ipc->tx_thread = kthread_run(kthread_worker_fn,
&ipc->kworker, "%s",
dev_name(ipc->dev));
if (IS_ERR(ipc->tx_thread)) {
dev_err(ipc->dev, "error: failed to create message TX task\n");
ret = PTR_ERR(ipc->tx_thread);
kfree(ipc->msg);
return ret;
}
kthread_init_work(&ipc->kwork, ipc_tx_msgs);
INIT_WORK(&ipc->kwork, ipc_tx_msgs);
return 0;
}
EXPORT_SYMBOL_GPL(sst_ipc_init);
@ -301,8 +309,7 @@ void sst_ipc_fini(struct sst_generic_ipc *ipc)
{
int i;
if (ipc->tx_thread)
kthread_stop(ipc->tx_thread);
cancel_work_sync(&ipc->kwork);
if (ipc->msg) {
for (i = 0; i < IPC_EMPTY_LIST_SIZE; i++) {

View File

@ -23,7 +23,6 @@
#include <linux/list.h>
#include <linux/workqueue.h>
#include <linux/sched.h>
#include <linux/kthread.h>
#define IPC_MAX_MAILBOX_BYTES 256
@ -52,6 +51,7 @@ struct sst_plat_ipc_ops {
void (*tx_data_copy)(struct ipc_message *, char *, size_t);
u64 (*reply_msg_match)(u64 header, u64 *mask);
bool (*is_dsp_busy)(struct sst_dsp *dsp);
int (*check_dsp_lp_on)(struct sst_dsp *dsp, bool state);
};
/* SST generic IPC data */
@ -65,8 +65,7 @@ struct sst_generic_ipc {
struct list_head empty_list;
wait_queue_head_t wait_txq;
struct task_struct *tx_thread;
struct kthread_worker kworker;
struct kthread_work kwork;
struct work_struct kwork;
bool pending;
struct ipc_message *msg;
int tx_data_max_size;
@ -81,6 +80,9 @@ int sst_ipc_tx_message_wait(struct sst_generic_ipc *ipc, u64 header,
int sst_ipc_tx_message_nowait(struct sst_generic_ipc *ipc, u64 header,
void *tx_data, size_t tx_bytes);
int sst_ipc_tx_message_nopm(struct sst_generic_ipc *ipc, u64 header,
void *tx_data, size_t tx_bytes, void *rx_data, size_t rx_bytes);
struct ipc_message *sst_ipc_reply_find_msg(struct sst_generic_ipc *ipc,
u64 header);

View File

@ -77,5 +77,62 @@ struct sst_acpi_mach *sst_acpi_find_machine(struct sst_acpi_mach *machines)
}
EXPORT_SYMBOL_GPL(sst_acpi_find_machine);
static acpi_status sst_acpi_find_package(acpi_handle handle, u32 level,
void *context, void **ret)
{
struct acpi_device *adev;
acpi_status status = AE_OK;
struct sst_acpi_package_context *pkg_ctx = context;
pkg_ctx->data_valid = false;
if (acpi_bus_get_device(handle, &adev))
return AE_OK;
if (adev->status.present && adev->status.functional) {
struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
union acpi_object *myobj = NULL;
status = acpi_evaluate_object_typed(handle, pkg_ctx->name,
NULL, &buffer,
ACPI_TYPE_PACKAGE);
if (ACPI_FAILURE(status))
return AE_OK;
myobj = buffer.pointer;
if (!myobj || myobj->package.count != pkg_ctx->length) {
kfree(buffer.pointer);
return AE_OK;
}
status = acpi_extract_package(myobj,
pkg_ctx->format, pkg_ctx->state);
if (ACPI_FAILURE(status)) {
kfree(buffer.pointer);
return AE_OK;
}
kfree(buffer.pointer);
pkg_ctx->data_valid = true;
return AE_CTRL_TERMINATE;
}
return AE_OK;
}
bool sst_acpi_find_package_from_hid(const u8 hid[ACPI_ID_LEN],
struct sst_acpi_package_context *ctx)
{
acpi_status status;
status = acpi_get_devices(hid, sst_acpi_find_package, ctx, NULL);
if (ACPI_FAILURE(status) || !ctx->data_valid)
return false;
return true;
}
EXPORT_SYMBOL_GPL(sst_acpi_find_package_from_hid);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Intel Common ACPI Match module");

View File

@ -26,7 +26,6 @@
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/platform_device.h>
#include <linux/kthread.h>
#include <linux/firmware.h>
#include <linux/dma-mapping.h>
#include <linux/debugfs.h>
@ -818,7 +817,7 @@ static irqreturn_t hsw_irq_thread(int irq, void *context)
spin_unlock_irqrestore(&sst->spinlock, flags);
/* continue to send any remaining messages... */
kthread_queue_work(&ipc->kworker, &ipc->kwork);
schedule_work(&ipc->kwork);
return IRQ_HANDLED;
}

View File

@ -43,6 +43,9 @@
#define BXT_ADSP_FW_BIN_HDR_OFFSET 0x2000
/* Delay before scheduling D0i3 entry */
#define BXT_D0I3_DELAY 5000
static unsigned int bxt_get_errorcode(struct sst_dsp *ctx)
{
return sst_dsp_shim_read(ctx, BXT_ADSP_ERROR_CODE);
@ -288,6 +291,141 @@ static int bxt_load_base_firmware(struct sst_dsp *ctx)
return ret;
}
/*
* Decide the D0i3 state that can be targeted based on the usecase
* ref counts and DSP state
*
* Decision Matrix: (X= dont care; state = target state)
*
* DSP state != SKL_DSP_RUNNING ; state = no d0i3
*
* DSP state == SKL_DSP_RUNNING , the following matrix applies
* non_d0i3 >0; streaming =X; non_streaming =X; state = no d0i3
* non_d0i3 =X; streaming =0; non_streaming =0; state = no d0i3
* non_d0i3 =0; streaming >0; non_streaming =X; state = streaming d0i3
* non_d0i3 =0; streaming =0; non_streaming =X; state = non-streaming d0i3
*/
static int bxt_d0i3_target_state(struct sst_dsp *ctx)
{
struct skl_sst *skl = ctx->thread_context;
struct skl_d0i3_data *d0i3 = &skl->d0i3;
if (skl->cores.state[SKL_DSP_CORE0_ID] != SKL_DSP_RUNNING)
return SKL_DSP_D0I3_NONE;
if (d0i3->non_d0i3)
return SKL_DSP_D0I3_NONE;
else if (d0i3->streaming)
return SKL_DSP_D0I3_STREAMING;
else if (d0i3->non_streaming)
return SKL_DSP_D0I3_NON_STREAMING;
else
return SKL_DSP_D0I3_NONE;
}
static void bxt_set_dsp_D0i3(struct work_struct *work)
{
int ret;
struct skl_ipc_d0ix_msg msg;
struct skl_sst *skl = container_of(work,
struct skl_sst, d0i3.work.work);
struct sst_dsp *ctx = skl->dsp;
struct skl_d0i3_data *d0i3 = &skl->d0i3;
int target_state;
dev_dbg(ctx->dev, "In %s:\n", __func__);
/* D0i3 entry allowed only if core 0 alone is running */
if (skl_dsp_get_enabled_cores(ctx) != SKL_DSP_CORE0_MASK) {
dev_warn(ctx->dev,
"D0i3 allowed when only core0 running:Exit\n");
return;
}
target_state = bxt_d0i3_target_state(ctx);
if (target_state == SKL_DSP_D0I3_NONE)
return;
msg.instance_id = 0;
msg.module_id = 0;
msg.wake = 1;
msg.streaming = 0;
if (target_state == SKL_DSP_D0I3_STREAMING)
msg.streaming = 1;
ret = skl_ipc_set_d0ix(&skl->ipc, &msg);
if (ret < 0) {
dev_err(ctx->dev, "Failed to set DSP to D0i3 state\n");
return;
}
/* Set Vendor specific register D0I3C.I3 to enable D0i3*/
if (skl->update_d0i3c)
skl->update_d0i3c(skl->dev, true);
d0i3->state = target_state;
skl->cores.state[SKL_DSP_CORE0_ID] = SKL_DSP_RUNNING_D0I3;
}
static int bxt_schedule_dsp_D0i3(struct sst_dsp *ctx)
{
struct skl_sst *skl = ctx->thread_context;
struct skl_d0i3_data *d0i3 = &skl->d0i3;
/* Schedule D0i3 only if the usecase ref counts are appropriate */
if (bxt_d0i3_target_state(ctx) != SKL_DSP_D0I3_NONE) {
dev_dbg(ctx->dev, "%s: Schedule D0i3\n", __func__);
schedule_delayed_work(&d0i3->work,
msecs_to_jiffies(BXT_D0I3_DELAY));
}
return 0;
}
static int bxt_set_dsp_D0i0(struct sst_dsp *ctx)
{
int ret;
struct skl_ipc_d0ix_msg msg;
struct skl_sst *skl = ctx->thread_context;
dev_dbg(ctx->dev, "In %s:\n", __func__);
/* First Cancel any pending attempt to put DSP to D0i3 */
cancel_delayed_work_sync(&skl->d0i3.work);
/* If DSP is currently in D0i3, bring it to D0i0 */
if (skl->cores.state[SKL_DSP_CORE0_ID] != SKL_DSP_RUNNING_D0I3)
return 0;
dev_dbg(ctx->dev, "Set DSP to D0i0\n");
msg.instance_id = 0;
msg.module_id = 0;
msg.streaming = 0;
msg.wake = 0;
if (skl->d0i3.state == SKL_DSP_D0I3_STREAMING)
msg.streaming = 1;
/* Clear Vendor specific register D0I3C.I3 to disable D0i3*/
if (skl->update_d0i3c)
skl->update_d0i3c(skl->dev, false);
ret = skl_ipc_set_d0ix(&skl->ipc, &msg);
if (ret < 0) {
dev_err(ctx->dev, "Failed to set DSP to D0i0\n");
return ret;
}
skl->cores.state[SKL_DSP_CORE0_ID] = SKL_DSP_RUNNING;
skl->d0i3.state = SKL_DSP_D0I3_NONE;
return 0;
}
static int bxt_set_dsp_D0(struct sst_dsp *ctx, unsigned int core_id)
{
struct skl_sst *skl = ctx->thread_context;
@ -414,6 +552,8 @@ static int bxt_set_dsp_D3(struct sst_dsp *ctx, unsigned int core_id)
static struct skl_dsp_fw_ops bxt_fw_ops = {
.set_state_D0 = bxt_set_dsp_D0,
.set_state_D3 = bxt_set_dsp_D3,
.set_state_D0i3 = bxt_schedule_dsp_D0i3,
.set_state_D0i0 = bxt_set_dsp_D0i0,
.load_fw = bxt_load_base_firmware,
.get_fw_errcode = bxt_get_errorcode,
.load_library = bxt_load_library,
@ -470,10 +610,15 @@ int bxt_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
if (ret)
return ret;
/* set the D0i3 check */
skl->ipc.ops.check_dsp_lp_on = skl_ipc_check_D0i0;
skl->cores.count = 2;
skl->boot_complete = false;
init_waitqueue_head(&skl->boot_wait);
skl->is_first_boot = true;
INIT_DELAYED_WORK(&skl->d0i3.work, bxt_set_dsp_D0i3);
skl->d0i3.state = SKL_DSP_D0I3_NONE;
if (dsp)
*dsp = skl;

View File

@ -294,6 +294,33 @@ int skl_free_dsp(struct skl *skl)
return 0;
}
/*
* In the case of "suspend_active" i.e, the Audio IP being active
* during system suspend, immediately excecute any pending D0i3 work
* before suspending. This is needed for the IP to work in low power
* mode during system suspend. In the case of normal suspend, cancel
* any pending D0i3 work.
*/
int skl_suspend_late_dsp(struct skl *skl)
{
struct skl_sst *ctx = skl->skl_sst;
struct delayed_work *dwork;
if (!ctx)
return 0;
dwork = &ctx->d0i3.work;
if (dwork->work.func) {
if (skl->supend_active)
flush_delayed_work(dwork);
else
cancel_delayed_work_sync(dwork);
}
return 0;
}
int skl_suspend_dsp(struct skl *skl)
{
struct skl_sst *ctx = skl->skl_sst;
@ -500,16 +527,14 @@ static void skl_setup_cpr_gateway_cfg(struct skl_sst *ctx,
int skl_dsp_set_dma_control(struct skl_sst *ctx, struct skl_module_cfg *mconfig)
{
struct skl_dma_control *dma_ctrl;
struct skl_i2s_config_blob config_blob;
struct skl_ipc_large_config_msg msg = {0};
int err = 0;
/*
* if blob size is same as capablity size, then no dma control
* present so return
* if blob size zero, then return
*/
if (mconfig->formats_config.caps_size == sizeof(config_blob))
if (mconfig->formats_config.caps_size == 0)
return 0;
msg.large_param_id = DMA_CONTROL_ID;
@ -523,7 +548,7 @@ int skl_dsp_set_dma_control(struct skl_sst *ctx, struct skl_module_cfg *mconfig)
dma_ctrl->node_id = skl_get_node_id(ctx, mconfig);
/* size in dwords */
dma_ctrl->config_length = sizeof(config_blob) / 4;
dma_ctrl->config_length = mconfig->formats_config.caps_size / 4;
memcpy(dma_ctrl->config_data, mconfig->formats_config.caps,
mconfig->formats_config.caps_size);
@ -531,7 +556,6 @@ int skl_dsp_set_dma_control(struct skl_sst *ctx, struct skl_module_cfg *mconfig)
err = skl_ipc_set_large_config(&ctx->ipc, &msg, (u32 *)dma_ctrl);
kfree(dma_ctrl);
return err;
}
@ -1042,7 +1066,8 @@ int skl_create_pipeline(struct skl_sst *ctx, struct skl_pipe *pipe)
dev_dbg(ctx->dev, "%s: pipe_id = %d\n", __func__, pipe->ppl_id);
ret = skl_ipc_create_pipeline(&ctx->ipc, pipe->memory_pages,
pipe->pipe_priority, pipe->ppl_id);
pipe->pipe_priority, pipe->ppl_id,
pipe->lp_mode);
if (ret < 0) {
dev_err(ctx->dev, "Failed to create pipeline\n");
return ret;

View File

@ -144,6 +144,8 @@ static int skl_pcm_open(struct snd_pcm_substream *substream,
struct hdac_ext_stream *stream;
struct snd_pcm_runtime *runtime = substream->runtime;
struct skl_dma_params *dma_params;
struct skl *skl = get_skl_ctx(dai->dev);
struct skl_module_cfg *mconfig;
dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
@ -177,6 +179,9 @@ static int skl_pcm_open(struct snd_pcm_substream *substream,
skl_set_suspend_active(substream, dai, true);
snd_pcm_set_sync(substream);
mconfig = skl_tplg_fe_get_cpr_module(dai, substream->stream);
skl_tplg_d0i3_get(skl, mconfig->d0i3_caps);
return 0;
}
@ -302,6 +307,7 @@ static void skl_pcm_close(struct snd_pcm_substream *substream,
struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
struct skl_dma_params *dma_params = NULL;
struct skl *skl = ebus_to_skl(ebus);
struct skl_module_cfg *mconfig;
dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
@ -325,6 +331,9 @@ static void skl_pcm_close(struct snd_pcm_substream *substream,
skl->skl_sst->miscbdcg_disabled = false;
}
mconfig = skl_tplg_fe_get_cpr_module(dai, substream->stream);
skl_tplg_d0i3_put(skl, mconfig->d0i3_caps);
kfree(dma_params);
}
@ -1031,10 +1040,24 @@ static snd_pcm_uframes_t skl_platform_pcm_pointer
(struct snd_pcm_substream *substream)
{
struct hdac_ext_stream *hstream = get_hdac_ext_stream(substream);
struct hdac_ext_bus *ebus = get_bus_ctx(substream);
unsigned int pos;
/* use the position buffer as default */
pos = snd_hdac_stream_get_pos_posbuf(hdac_stream(hstream));
/*
* Use DPIB for Playback stream as the periodic DMA Position-in-
* Buffer Writes may be scheduled at the same time or later than
* the MSI and does not guarantee to reflect the Position of the
* last buffer that was transferred. Whereas DPIB register in
* HAD space reflects the actual data that is transferred.
* Use the position buffer for capture, as DPIB write gets
* completed earlier than the actual data written to the DDR.
*/
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
pos = readl(ebus->bus.remap_addr + AZX_REG_VS_SDXDPIB_XBASE +
(AZX_REG_VS_SDXDPIB_XINTERVAL *
hdac_stream(hstream)->index));
else
pos = snd_hdac_stream_get_pos_posbuf(hdac_stream(hstream));
if (pos >= hdac_stream(hstream)->bufsize)
pos = 0;
@ -1197,6 +1220,7 @@ static int skl_platform_soc_probe(struct snd_soc_platform *platform)
return ret;
}
skl_populate_modules(skl);
skl->skl_sst->update_d0i3c = skl_update_d0i3c;
}
pm_runtime_mark_last_busy(platform->dev);
pm_runtime_put_autosuspend(platform->dev);

View File

@ -17,7 +17,6 @@
#include <linux/device.h>
#include <linux/mm.h>
#include <linux/kthread.h>
#include <linux/delay.h>
#include "../common/sst-dsp.h"
#include "../common/sst-dsp-priv.h"

View File

@ -126,11 +126,21 @@ struct sst_dsp_device;
#define SKL_ADSPCS_CPA_SHIFT 24
#define SKL_ADSPCS_CPA_MASK(cm) ((cm) << SKL_ADSPCS_CPA_SHIFT)
/* DSP Core state */
enum skl_dsp_states {
SKL_DSP_RUNNING = 1,
/* Running in D0i3 state; can be in streaming or non-streaming D0i3 */
SKL_DSP_RUNNING_D0I3, /* Running in D0i3 state*/
SKL_DSP_RESET,
};
/* D0i3 substates */
enum skl_dsp_d0i3_states {
SKL_DSP_D0I3_NONE = -1, /* No D0i3 */
SKL_DSP_D0I3_NON_STREAMING = 0,
SKL_DSP_D0I3_STREAMING = 1,
};
struct skl_dsp_fw_ops {
int (*load_fw)(struct sst_dsp *ctx);
/* FW module parser/loader */
@ -139,6 +149,8 @@ struct skl_dsp_fw_ops {
int (*parse_fw)(struct sst_dsp *ctx);
int (*set_state_D0)(struct sst_dsp *ctx, unsigned int core_id);
int (*set_state_D3)(struct sst_dsp *ctx, unsigned int core_id);
int (*set_state_D0i3)(struct sst_dsp *ctx);
int (*set_state_D0i0)(struct sst_dsp *ctx);
unsigned int (*get_fw_errcode)(struct sst_dsp *ctx);
int (*load_mod)(struct sst_dsp *ctx, u16 mod_id, u8 *mod_name);
int (*unload_mod)(struct sst_dsp *ctx, u16 mod_id);

View File

@ -81,6 +81,11 @@
#define IPC_INSTANCE_ID(x) (((x) & IPC_INSTANCE_ID_MASK) \
<< IPC_INSTANCE_ID_SHIFT)
#define IPC_PPL_LP_MODE_SHIFT 0
#define IPC_PPL_LP_MODE_MASK 0x1
#define IPC_PPL_LP_MODE(x) (((x) & IPC_PPL_LP_MODE_MASK) \
<< IPC_PPL_LP_MODE_SHIFT)
/* Set pipeline state message */
#define IPC_PPL_STATE_SHIFT 0
#define IPC_PPL_STATE_MASK 0x1F
@ -172,6 +177,17 @@
<< IPC_INITIAL_BLOCK_SHIFT)
#define IPC_INITIAL_BLOCK_CLEAR ~(IPC_INITIAL_BLOCK_MASK \
<< IPC_INITIAL_BLOCK_SHIFT)
/* Set D0ix IPC extension register */
#define IPC_D0IX_WAKE_SHIFT 0
#define IPC_D0IX_WAKE_MASK 0x1
#define IPC_D0IX_WAKE(x) (((x) & IPC_D0IX_WAKE_MASK) \
<< IPC_D0IX_WAKE_SHIFT)
#define IPC_D0IX_STREAMING_SHIFT 1
#define IPC_D0IX_STREAMING_MASK 0x1
#define IPC_D0IX_STREAMING(x) (((x) & IPC_D0IX_STREAMING_MASK) \
<< IPC_D0IX_STREAMING_SHIFT)
enum skl_ipc_msg_target {
IPC_FW_GEN_MSG = 0,
@ -258,7 +274,8 @@ enum skl_ipc_module_msg {
IPC_MOD_LARGE_CONFIG_SET = 4,
IPC_MOD_BIND = 5,
IPC_MOD_UNBIND = 6,
IPC_MOD_SET_DX = 7
IPC_MOD_SET_DX = 7,
IPC_MOD_SET_D0IX = 8
};
static void skl_ipc_tx_data_copy(struct ipc_message *msg, char *tx_data,
@ -289,6 +306,23 @@ static void skl_ipc_tx_msg(struct sst_generic_ipc *ipc, struct ipc_message *msg)
header->primary | SKL_ADSP_REG_HIPCI_BUSY);
}
int skl_ipc_check_D0i0(struct sst_dsp *dsp, bool state)
{
int ret;
/* check D0i3 support */
if (!dsp->fw_ops.set_state_D0i0)
return 0;
/* Attempt D0i0 or D0i3 based on state */
if (state)
ret = dsp->fw_ops.set_state_D0i0(dsp);
else
ret = dsp->fw_ops.set_state_D0i3(dsp);
return ret;
}
static struct ipc_message *skl_ipc_reply_get_msg(struct sst_generic_ipc *ipc,
u64 ipc_header)
{
@ -464,7 +498,7 @@ irqreturn_t skl_dsp_irq_thread_handler(int irq, void *context)
skl_ipc_int_enable(dsp);
/* continue to send any remaining messages... */
kthread_queue_work(&ipc->kworker, &ipc->kwork);
schedule_work(&ipc->kwork);
return IRQ_HANDLED;
}
@ -547,7 +581,7 @@ void skl_ipc_free(struct sst_generic_ipc *ipc)
}
int skl_ipc_create_pipeline(struct sst_generic_ipc *ipc,
u16 ppl_mem_size, u8 ppl_type, u8 instance_id)
u16 ppl_mem_size, u8 ppl_type, u8 instance_id, u8 lp_mode)
{
struct skl_ipc_header header = {0};
u64 *ipc_header = (u64 *)(&header);
@ -560,6 +594,8 @@ int skl_ipc_create_pipeline(struct sst_generic_ipc *ipc,
header.primary |= IPC_PPL_TYPE(ppl_type);
header.primary |= IPC_PPL_MEM_SIZE(ppl_mem_size);
header.extension = IPC_PPL_LP_MODE(lp_mode);
dev_dbg(ipc->dev, "In %s header=%d\n", __func__, header.primary);
ret = sst_ipc_tx_message_wait(ipc, *ipc_header, NULL, 0, NULL, 0);
if (ret < 0) {
@ -931,3 +967,32 @@ int skl_sst_ipc_load_library(struct sst_generic_ipc *ipc,
return ret;
}
EXPORT_SYMBOL_GPL(skl_sst_ipc_load_library);
int skl_ipc_set_d0ix(struct sst_generic_ipc *ipc, struct skl_ipc_d0ix_msg *msg)
{
struct skl_ipc_header header = {0};
u64 *ipc_header = (u64 *)(&header);
int ret;
header.primary = IPC_MSG_TARGET(IPC_MOD_MSG);
header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
header.primary |= IPC_GLB_TYPE(IPC_MOD_SET_D0IX);
header.primary |= IPC_MOD_INSTANCE_ID(msg->instance_id);
header.primary |= IPC_MOD_ID(msg->module_id);
header.extension = IPC_D0IX_WAKE(msg->wake);
header.extension |= IPC_D0IX_STREAMING(msg->streaming);
dev_dbg(ipc->dev, "In %s primary=%x ext=%x\n", __func__,
header.primary, header.extension);
/*
* Use the nopm IPC here as we dont want it checking for D0iX
*/
ret = sst_ipc_tx_message_nopm(ipc, *ipc_header, NULL, 0, NULL, 0);
if (ret < 0)
dev_err(ipc->dev, "ipc: set d0ix failed, err %d\n", ret);
return ret;
}
EXPORT_SYMBOL_GPL(skl_ipc_set_d0ix);

View File

@ -16,7 +16,6 @@
#ifndef __SKL_IPC_H
#define __SKL_IPC_H
#include <linux/kthread.h>
#include <linux/irqreturn.h>
#include "../common/sst-ipc.h"
@ -53,6 +52,23 @@ struct skl_dsp_cores {
int usage_count[SKL_DSP_CORES_MAX];
};
/**
* skl_d0i3_data: skl D0i3 counters data struct
*
* @streaming: Count of usecases that can attempt streaming D0i3
* @non_streaming: Count of usecases that can attempt non-streaming D0i3
* @non_d0i3: Count of usecases that cannot attempt D0i3
* @state: current state
* @work: D0i3 worker thread
*/
struct skl_d0i3_data {
int streaming;
int non_streaming;
int non_d0i3;
enum skl_dsp_d0i3_states state;
struct delayed_work work;
};
struct skl_sst {
struct device *dev;
struct sst_dsp *dsp;
@ -83,6 +99,11 @@ struct skl_sst {
/* tplg manifest */
struct skl_dfw_manifest manifest;
/* Callback to update D0i3C register */
void (*update_d0i3c)(struct device *dev, bool enable);
struct skl_d0i3_data d0i3;
};
struct skl_ipc_init_instance_msg {
@ -111,6 +132,13 @@ struct skl_ipc_large_config_msg {
u32 param_data_size;
};
struct skl_ipc_d0ix_msg {
u32 module_id;
u32 instance_id;
u8 streaming;
u8 wake;
};
#define SKL_IPC_BOOT_MSECS 3000
#define SKL_IPC_D3_MASK 0
@ -119,7 +147,7 @@ struct skl_ipc_large_config_msg {
irqreturn_t skl_dsp_irq_thread_handler(int irq, void *context);
int skl_ipc_create_pipeline(struct sst_generic_ipc *sst_ipc,
u16 ppl_mem_size, u8 ppl_type, u8 instance_id);
u16 ppl_mem_size, u8 ppl_type, u8 instance_id, u8 lp_mode);
int skl_ipc_delete_pipeline(struct sst_generic_ipc *sst_ipc, u8 instance_id);
@ -155,6 +183,11 @@ int skl_ipc_get_large_config(struct sst_generic_ipc *ipc,
int skl_sst_ipc_load_library(struct sst_generic_ipc *ipc,
u8 dma_id, u8 table_id);
int skl_ipc_set_d0ix(struct sst_generic_ipc *ipc,
struct skl_ipc_d0ix_msg *msg);
int skl_ipc_check_D0i0(struct sst_dsp *dsp, bool state);
void skl_ipc_int_enable(struct sst_dsp *dsp);
void skl_ipc_op_int_enable(struct sst_dsp *ctx);
void skl_ipc_op_int_disable(struct sst_dsp *ctx);

View File

@ -179,7 +179,7 @@ static inline int skl_getid_32(struct uuid_module *module, u64 *val,
index = ffz(mask_val);
pvt_id = index + word1_mask + word2_mask;
if (pvt_id <= (max_inst - 1)) {
*val |= 1 << (index + word1_mask);
*val |= 1ULL << (index + word1_mask);
return pvt_id;
}
}

View File

@ -36,6 +36,44 @@
#define SKL_IN_DIR_BIT_MASK BIT(0)
#define SKL_PIN_COUNT_MASK GENMASK(7, 4)
void skl_tplg_d0i3_get(struct skl *skl, enum d0i3_capability caps)
{
struct skl_d0i3_data *d0i3 = &skl->skl_sst->d0i3;
switch (caps) {
case SKL_D0I3_NONE:
d0i3->non_d0i3++;
break;
case SKL_D0I3_STREAMING:
d0i3->streaming++;
break;
case SKL_D0I3_NON_STREAMING:
d0i3->non_streaming++;
break;
}
}
void skl_tplg_d0i3_put(struct skl *skl, enum d0i3_capability caps)
{
struct skl_d0i3_data *d0i3 = &skl->skl_sst->d0i3;
switch (caps) {
case SKL_D0I3_NONE:
d0i3->non_d0i3--;
break;
case SKL_D0I3_STREAMING:
d0i3->streaming--;
break;
case SKL_D0I3_NON_STREAMING:
d0i3->non_streaming--;
break;
}
}
/*
* SKL DSP driver modelling uses only few DAPM widgets so for rest we will
* ignore. This helpers checks if the SKL driver handles this widget type
@ -1519,6 +1557,10 @@ static int skl_tplg_fill_pipe_tkn(struct device *dev,
pipe->memory_pages = tkn_val;
break;
case SKL_TKN_U32_PMODE:
pipe->lp_mode = tkn_val;
break;
default:
dev_err(dev, "Token not handled %d\n", tkn);
return -EINVAL;
@ -1826,6 +1868,10 @@ static int skl_tplg_get_token(struct device *dev,
mconfig->converter = tkn_elem->value;
break;
case SKL_TKL_U32_D0I3_CAPS:
mconfig->d0i3_caps = tkn_elem->value;
break;
case SKL_TKN_U32_PIPE_ID:
ret = skl_tplg_add_pipe(dev,
mconfig, skl, tkn_elem);
@ -1841,6 +1887,7 @@ static int skl_tplg_get_token(struct device *dev,
case SKL_TKN_U32_PIPE_CONN_TYPE:
case SKL_TKN_U32_PIPE_PRIORITY:
case SKL_TKN_U32_PIPE_MEM_PGS:
case SKL_TKN_U32_PMODE:
if (is_pipe_exists) {
ret = skl_tplg_fill_pipe_tkn(dev, mconfig->pipe,
tkn_elem->token, tkn_elem->value);

View File

@ -113,23 +113,6 @@ struct skl_cpr_gtw_cfg {
u32 config_data[1];
} __packed;
struct skl_i2s_config_blob {
u32 gateway_attrib;
u32 tdm_ts_group[8];
u32 ssc0;
u32 ssc1;
u32 sscto;
u32 sspsp;
u32 sstsa;
u32 ssrsa;
u32 ssc2;
u32 sspsp2;
u32 ssc3;
u32 ssioc;
u32 mdivc;
u32 mdivr;
} __packed;
struct skl_dma_control {
u32 node_id;
u32 config_length;
@ -279,6 +262,7 @@ struct skl_pipe {
u8 pipe_priority;
u16 conn_type;
u32 memory_pages;
u8 lp_mode;
struct skl_pipe_params *p_params;
enum skl_pipe_state state;
struct list_head w_list;
@ -293,6 +277,12 @@ enum skl_module_state {
SKL_MODULE_UNLOADED = 4,
};
enum d0i3_capability {
SKL_D0I3_NONE = 0,
SKL_D0I3_STREAMING = 1,
SKL_D0I3_NON_STREAMING = 2,
};
struct skl_module_cfg {
u8 guid[16];
struct skl_module_inst_id id;
@ -319,6 +309,7 @@ struct skl_module_cfg {
u32 converter;
u32 vbus_id;
u32 mem_pages;
enum d0i3_capability d0i3_caps;
struct skl_module_pin *m_in_pin;
struct skl_module_pin *m_out_pin;
enum skl_module_type m_type;
@ -361,6 +352,9 @@ struct skl_module_cfg *skl_tplg_fe_get_cpr_module(
int skl_tplg_update_pipe_params(struct device *dev,
struct skl_module_cfg *mconfig, struct skl_pipe_params *params);
void skl_tplg_d0i3_get(struct skl *skl, enum d0i3_capability caps);
void skl_tplg_d0i3_put(struct skl *skl, enum d0i3_capability caps);
int skl_create_pipeline(struct skl_sst *ctx, struct skl_pipe *pipe);
int skl_run_pipe(struct skl_sst *ctx, struct skl_pipe *pipe);

View File

@ -26,6 +26,7 @@
#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
#include <linux/firmware.h>
#include <linux/delay.h>
#include <sound/pcm.h>
#include "../common/sst-acpi.h"
#include <sound/hda_register.h>
@ -109,6 +110,52 @@ static int skl_init_chip(struct hdac_bus *bus, bool full_reset)
return ret;
}
void skl_update_d0i3c(struct device *dev, bool enable)
{
struct pci_dev *pci = to_pci_dev(dev);
struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
struct hdac_bus *bus = ebus_to_hbus(ebus);
u8 reg;
int timeout = 50;
reg = snd_hdac_chip_readb(bus, VS_D0I3C);
/* Do not write to D0I3C until command in progress bit is cleared */
while ((reg & AZX_REG_VS_D0I3C_CIP) && --timeout) {
udelay(10);
reg = snd_hdac_chip_readb(bus, VS_D0I3C);
}
/* Highly unlikely. But if it happens, flag error explicitly */
if (!timeout) {
dev_err(bus->dev, "Before D0I3C update: D0I3C CIP timeout\n");
return;
}
if (enable)
reg = reg | AZX_REG_VS_D0I3C_I3;
else
reg = reg & (~AZX_REG_VS_D0I3C_I3);
snd_hdac_chip_writeb(bus, VS_D0I3C, reg);
timeout = 50;
/* Wait for cmd in progress to be cleared before exiting the function */
reg = snd_hdac_chip_readb(bus, VS_D0I3C);
while ((reg & AZX_REG_VS_D0I3C_CIP) && --timeout) {
udelay(10);
reg = snd_hdac_chip_readb(bus, VS_D0I3C);
}
/* Highly unlikely. But if it happens, flag error explicitly */
if (!timeout) {
dev_err(bus->dev, "After D0I3C update: D0I3C CIP timeout\n");
return;
}
dev_dbg(bus->dev, "D0I3C register = 0x%x\n",
snd_hdac_chip_readb(bus, VS_D0I3C));
}
/* called from IRQ */
static void skl_stream_update(struct hdac_bus *bus, struct hdac_stream *hstr)
{
@ -181,6 +228,15 @@ static int skl_acquire_irq(struct hdac_ext_bus *ebus, int do_disconnect)
return 0;
}
static int skl_suspend_late(struct device *dev)
{
struct pci_dev *pci = to_pci_dev(dev);
struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
struct skl *skl = ebus_to_skl(ebus);
return skl_suspend_late_dsp(skl);
}
#ifdef CONFIG_PM
static int _skl_suspend(struct hdac_ext_bus *ebus)
{
@ -243,7 +299,6 @@ static int skl_suspend(struct device *dev)
enable_irq_wake(bus->irq);
pci_save_state(pci);
pci_disable_device(pci);
} else {
ret = _skl_suspend(ebus);
if (ret < 0)
@ -286,7 +341,6 @@ static int skl_resume(struct device *dev)
*/
if (skl->supend_active) {
pci_restore_state(pci);
ret = pci_enable_device(pci);
snd_hdac_ext_bus_link_power_up_all(ebus);
disable_irq_wake(bus->irq);
/*
@ -345,6 +399,7 @@ static int skl_runtime_resume(struct device *dev)
static const struct dev_pm_ops skl_pm = {
SET_SYSTEM_SLEEP_PM_OPS(skl_suspend, skl_resume)
SET_RUNTIME_PM_OPS(skl_runtime_suspend, skl_runtime_resume, NULL)
.suspend_late = skl_suspend_late,
};
/*

View File

@ -52,6 +52,9 @@
#define AZX_PGCTL_LSRMD_MASK (1 << 4)
#define AZX_PCIREG_CGCTL 0x48
#define AZX_CGCTL_MISCBDCGE_MASK (1 << 6)
/* D0I3C Register fields */
#define AZX_REG_VS_D0I3C_CIP 0x1 /* Command in progress */
#define AZX_REG_VS_D0I3C_I3 0x4 /* D0i3 enable */
struct skl_dsp_resource {
u32 max_mcps;
@ -121,8 +124,11 @@ int skl_get_dmic_geo(struct skl *skl);
int skl_nhlt_update_topology_bin(struct skl *skl);
int skl_init_dsp(struct skl *skl);
int skl_free_dsp(struct skl *skl);
int skl_suspend_late_dsp(struct skl *skl);
int skl_suspend_dsp(struct skl *skl);
int skl_resume_dsp(struct skl *skl);
void skl_cleanup_resources(struct skl *skl);
const struct skl_dsp_ops *skl_get_dsp_ops(int pci_id);
void skl_update_d0i3c(struct device *dev, bool enable);
#endif /* __SOUND_SOC_SKL_H */