qmsi: update qmsi to 1.1 alpha

Change-Id: Ib35ebcb32954f764ef8e33f6a1c11ad9f63931bc
Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
Anas Nashif 2016-05-18 07:09:16 -04:00 committed by Anas Nashif
commit f35d6e04e3
98 changed files with 13374 additions and 3329 deletions

View file

@ -15,7 +15,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <errno.h>
#include <init.h>
@ -27,8 +26,9 @@
#include <arch/cpu.h>
#include <atomic.h>
#include "qm_isr.h"
#include "qm_adc.h"
#include "qm_scss.h"
#include "clk.h"
enum {
ADC_STATE_IDLE,
@ -42,24 +42,24 @@ struct adc_info {
struct nano_sem sem;
};
static void adc_config_irq(void);
static qm_adc_config_t cfg;
#if (CONFIG_ADC_QMSI_INTERRUPT)
static struct adc_info *adc_context;
static void adc_config_irq(void);
static void complete_callback(void)
static void complete_callback(void *data, int error, qm_adc_status_t status,
qm_adc_cb_source_t source)
{
if (adc_context) {
if (error) {
adc_context->state = ADC_STATE_ERROR;
}
device_sync_call_complete(&adc_context->sync);
}
}
static void error_callback(void)
{
if (adc_context) {
adc_context->state = ADC_STATE_ERROR;
device_sync_call_complete(&adc_context->sync);
}
}
#endif
static void adc_lock(struct adc_info *data)
{
@ -110,29 +110,28 @@ static int adc_qmsi_read(struct device *dev, struct adc_seq_table *seq_tbl)
{
int i, ret = 0;
qm_adc_xfer_t xfer;
qm_adc_config_t cfg;
struct adc_info *info = dev->driver_data;
if (qm_adc_get_config(QM_ADC_0, &cfg) != QM_RC_OK) {
return -ENOTSUP;
}
for (i = 0; i < seq_tbl->num_entries; i++) {
xfer.ch = (qm_adc_channel_t *)&seq_tbl->entries[i].channel_id;
/* Just one channel at the time using the Zephyr sequence table */
xfer.ch_len = 1;
xfer.samples = (uint32_t *)seq_tbl->entries[i].buffer;
/* buffer length (bytes) the number of samples, the QMSI Driver does
* not allow more than QM_ADC_FIFO_LEN samples at the time in polling
* mode, if that happens, the qm_adc_convert api will return with an
* error
/* Just one channel at the time using the Zephyr sequence table
*/
xfer.samples_len = (seq_tbl->entries[i].buffer_length);
xfer.complete_callback = NULL;
xfer.error_callback = NULL;
xfer.ch_len = 1;
xfer.samples = (qm_adc_sample_t *)seq_tbl->entries[i].buffer;
/* buffer length (bytes) the number of samples, the QMSI Driver
* does not allow more than QM_ADC_FIFO_LEN samples at the time
* in polling mode, if that happens, the qm_adc_convert api will
* return with an error
*/
xfer.samples_len =
(seq_tbl->entries[i].buffer_length)/sizeof(qm_adc_sample_t);
xfer.callback = NULL;
xfer.callback_data = NULL;
cfg.window = seq_tbl->entries[i].sampling_delay;
@ -144,9 +143,10 @@ static int adc_qmsi_read(struct device *dev, struct adc_seq_table *seq_tbl)
break;
}
/* Run the conversion, here the function will poll for the samples
* The function will constantly read the status register to check if
* the number of samples required has been captured
/* Run the conversion, here the function will poll for the
* samples. The function will constantly read the status
* register to check if the number of samples required has been
* captured
*/
if (qm_adc_convert(QM_ADC_0, &xfer) != QM_RC_OK) {
ret = -EIO;
@ -165,29 +165,28 @@ static int adc_qmsi_read(struct device *dev, struct adc_seq_table *seq_tbl)
{
int i, ret = 0;
qm_adc_xfer_t xfer;
qm_adc_config_t cfg;
struct adc_info *info = dev->driver_data;
if (qm_adc_get_config(QM_ADC_0, &cfg) != QM_RC_OK) {
return -ENOTSUP;
}
for (i = 0; i < seq_tbl->num_entries; i++) {
xfer.ch = (qm_adc_channel_t *)&seq_tbl->entries[i].channel_id;
/* Just one channel at the time using the Zephyr sequence table */
xfer.ch_len = 1;
xfer.samples = (uint32_t *)seq_tbl->entries[i].buffer;
xfer.samples_len = (seq_tbl->entries[i].buffer_length) >> 2;
xfer.complete_callback = complete_callback;
xfer.error_callback = error_callback;
xfer.samples =
(qm_adc_sample_t *)seq_tbl->entries[i].buffer;
xfer.samples_len =
(seq_tbl->entries[i].buffer_length)/sizeof(qm_adc_sample_t);
xfer.callback = complete_callback;
xfer.callback_data = NULL;
cfg.window = seq_tbl->entries[i].sampling_delay;
adc_lock(info);
if (qm_adc_set_config(QM_ADC_0, &cfg) != QM_RC_OK) {
if (qm_adc_set_config(QM_ADC_0, &cfg) != 0) {
ret = -EINVAL;
adc_unlock(info);
break;
@ -200,7 +199,7 @@ static int adc_qmsi_read(struct device *dev, struct adc_seq_table *seq_tbl)
* call the complete_callback function once the samples have been
* obtained
*/
if (qm_adc_irq_convert(QM_ADC_0, &xfer) != QM_RC_OK) {
if (qm_adc_irq_convert(QM_ADC_0, &xfer) != 0) {
adc_context = NULL;
ret = -EIO;
adc_unlock(info);
@ -225,11 +224,6 @@ static int adc_qmsi_read(struct device *dev, struct adc_seq_table *seq_tbl)
}
#endif /* CONFIG_ADC_QMSI_POLL */
void adc_qmsi_isr(void *arg)
{
qm_adc_0_isr();
}
static struct adc_driver_api api_funcs = {
.enable = adc_qmsi_enable,
.disable = adc_qmsi_disable,
@ -238,8 +232,6 @@ static struct adc_driver_api api_funcs = {
int adc_qmsi_init(struct device *dev)
{
qm_adc_config_t cfg;
struct adc_info *info = dev->driver_data;
/* Enable the ADC and set the clock divisor */

View file

@ -40,16 +40,13 @@ struct aio_qmsi_cmp_dev_data_t {
struct aio_qmsi_cmp_cb cb[AIO_QMSI_CMP_COUNT];
};
/* Shadow configuration to keep track of changes */
static qm_ac_config_t config;
static int aio_cmp_config(struct device *dev);
static int aio_qmsi_cmp_disable(struct device *dev, uint8_t index)
{
qm_ac_config_t config;
if (qm_ac_get_config(&config) != QM_RC_OK) {
return -EINVAL;
}
if (index >= AIO_QMSI_CMP_COUNT) {
return -EINVAL;
}
@ -61,7 +58,7 @@ static int aio_qmsi_cmp_disable(struct device *dev, uint8_t index)
config.int_en &= ~(1 << index);
config.power &= ~(1 << index);
if (qm_ac_set_config(&config) != QM_RC_OK) {
if (qm_ac_set_config(&config) != 0) {
return -EINVAL;
}
@ -76,12 +73,6 @@ static int aio_qmsi_cmp_configure(struct device *dev, uint8_t index,
struct aio_qmsi_cmp_dev_data_t *dev_data =
(struct aio_qmsi_cmp_dev_data_t *)dev->driver_data;
qm_ac_config_t config;
if (qm_ac_get_config(&config) != QM_RC_OK) {
return -EINVAL;
}
if (index >= AIO_QMSI_CMP_COUNT) {
return -EINVAL;
}
@ -108,7 +99,7 @@ static int aio_qmsi_cmp_configure(struct device *dev, uint8_t index,
config.int_en |= (1 << index);
config.power |= (1 << index);
if (qm_ac_set_config(&config) != QM_RC_OK) {
if (qm_ac_set_config(&config) != 0) {
return -EINVAL;
}
@ -139,6 +130,14 @@ int aio_qmsi_cmp_init(struct device *dev)
QM_SCSS_CMP->cmp_pwr &= ~INT_COMPARATORS_MASK;
QM_SCSS_CMP->cmp_en &= ~INT_COMPARATORS_MASK;
/* Don't use the QMSI callback */
config.callback = NULL;
/* Get Initial configuration from HW */
config.reference = QM_SCSS_CMP->cmp_ref_sel;
config.polarity = QM_SCSS_CMP->cmp_ref_pol;
config.power = QM_SCSS_CMP->cmp_pwr;
config.int_en = QM_SCSS_CMP->cmp_en;
/* Clear callback pointers */
for (i = 0; i < dev_data->num_cmp; i++) {
dev_data->cb[i].cb = NULL;

View file

@ -25,7 +25,7 @@
static int aon_counter_qmsi_start(struct device *dev)
{
if (qm_aonc_enable(QM_SCSS_AON_0) != QM_RC_OK) {
if (qm_aonc_enable(QM_SCSS_AON_0)) {
return -EIO;
}
@ -41,7 +41,11 @@ static int aon_counter_qmsi_stop(struct device *dev)
static uint32_t aon_counter_qmsi_read(void)
{
return qm_aonc_get_value(QM_SCSS_AON_0);
uint32_t value;
qm_aonc_get_value(QM_SCSS_AON_0, &value);
return value;
}
static int aon_counter_qmsi_set_alarm(struct device *dev,

View file

@ -22,22 +22,17 @@
#include <counter.h>
#include "qm_aon_counters.h"
#include "qm_isr.h"
struct aon_timer_data {
void *callback_user_data;
counter_callback_t timer_callback;
};
static void aonpt_int_callback(void *user_data);
static void aonpt_int_callback(void);
static struct aon_timer_data aonpt_driver_data;
static counter_callback_t user_cb;
static int aon_timer_qmsi_start(struct device *dev)
{
struct aon_timer_data *driver_data = dev->driver_data;
qm_aonpt_config_t qmsi_cfg;
driver_data->timer_callback = NULL;
user_cb = NULL;
qmsi_cfg.callback = NULL;
qmsi_cfg.int_en = false;
@ -45,9 +40,9 @@ static int aon_timer_qmsi_start(struct device *dev)
* the maximum value.
*/
qmsi_cfg.count = 0xffffffff;
qmsi_cfg.callback_data = NULL;
if (qm_aonpt_set_config(QM_SCSS_AON_0, &qmsi_cfg) !=
QM_RC_OK) {
if (qm_aonpt_set_config(QM_SCSS_AON_0, &qmsi_cfg)) {
return -EIO;
}
@ -61,6 +56,7 @@ static int aon_timer_qmsi_stop(struct device *dev)
qmsi_cfg.callback = NULL;
qmsi_cfg.int_en = false;
qmsi_cfg.count = 0;
qmsi_cfg.callback_data = NULL;
qm_aonpt_set_config(QM_SCSS_AON_0, &qmsi_cfg);
@ -69,34 +65,33 @@ static int aon_timer_qmsi_stop(struct device *dev)
static uint32_t aon_timer_qmsi_read(void)
{
return qm_aonpt_get_value(QM_SCSS_AON_0);
uint32_t value;
qm_aonpt_get_value(QM_SCSS_AON_0, &value);
return value;
}
static int aon_timer_qmsi_set_alarm(struct device *dev,
counter_callback_t callback,
uint32_t count, void *user_data)
{
struct aon_timer_data *driver_data = dev->driver_data;
qm_aonpt_config_t qmsi_cfg;
qm_aonpt_get_config(QM_SCSS_AON_0, &qmsi_cfg);
/* Check if timer has been started */
if (qmsi_cfg.count == 0) {
if (QM_SCSS_AON[QM_SCSS_AON_0].aonpt_cfg == 0) {
return -ENOTSUP;
}
driver_data->timer_callback = callback;
driver_data->callback_user_data = user_data;
user_cb = callback;
qmsi_cfg.callback = aonpt_int_callback;
qmsi_cfg.int_en = true;
qmsi_cfg.count = count;
qmsi_cfg.callback_data = user_data;
if (qm_aonpt_set_config(QM_SCSS_AON_0, &qmsi_cfg) !=
QM_RC_OK) {
driver_data->timer_callback = NULL;
driver_data->callback_user_data = NULL;
if (qm_aonpt_set_config(QM_SCSS_AON_0, &qmsi_cfg)) {
user_cb = NULL;
return -EIO;
}
@ -112,14 +107,12 @@ static struct counter_driver_api aon_timer_qmsi_api = {
static int aon_timer_init(struct device *dev)
{
struct aon_timer_data *driver_data = dev->driver_data;
dev->driver_api = &aon_timer_qmsi_api;
driver_data->callback_user_data = NULL;
driver_data->timer_callback = NULL;
user_cb = NULL;
IRQ_CONNECT(QM_IRQ_AONPT_0,
CONFIG_AON_TIMER_IRQ_PRI, qm_aonpt_isr_0,
NULL, IOAPIC_EDGE | IOAPIC_HIGH);
IRQ_CONNECT(QM_IRQ_AONPT_0, CONFIG_AON_TIMER_IRQ_PRI,
qm_aonpt_isr_0, NULL, IOAPIC_EDGE | IOAPIC_HIGH);
irq_enable(QM_IRQ_AONPT_0);
@ -129,17 +122,13 @@ static int aon_timer_init(struct device *dev)
}
DEVICE_AND_API_INIT(aon_timer, CONFIG_AON_TIMER_QMSI_DEV_NAME,
aon_timer_init, &aonpt_driver_data, NULL, SECONDARY,
aon_timer_init, NULL, NULL, SECONDARY,
CONFIG_KERNEL_INIT_PRIORITY_DEVICE,
(void *)&aon_timer_qmsi_api);
static void aonpt_int_callback(void)
static void aonpt_int_callback(void *user_data)
{
struct device *dev = DEVICE_GET(aon_timer);
struct aon_timer_data *driver_data = dev->driver_data;
if (driver_data->timer_callback) {
(*driver_data->timer_callback)(dev,
driver_data->callback_user_data);
if (user_cb) {
(*user_cb)(DEVICE_GET(aon_timer), user_data);
}
}

View file

@ -163,8 +163,7 @@ static int flash_qmsi_erase(struct device *dev, off_t addr, size_t size)
return -EIO;
}
/* QM_FLASH_PAGE_SIZE--number of 32 bit words */
if (flash_region(addr + size - (QM_FLASH_PAGE_SIZE << 2)) ==
if (flash_region(addr + size - (QM_FLASH_PAGE_SIZE_DWORDS << 2)) ==
QM_FLASH_REGION_NUM) {
return -EIO;
}
@ -189,7 +188,8 @@ static int flash_qmsi_write_protection(struct device *dev, bool enable)
{
qm_flash_config_t qm_cfg;
qm_flash_get_config(QM_FLASH_0, &qm_cfg);
qm_cfg.us_count = CONFIG_SOC_FLASH_QMSI_CLK_COUNT_US;
qm_cfg.wait_states = CONFIG_SOC_FLASH_QMSI_WAIT_STATES;
if (enable) {
qm_cfg.write_disable = QM_FLASH_WRITE_DISABLE;
@ -200,14 +200,6 @@ static int flash_qmsi_write_protection(struct device *dev, bool enable)
qm_flash_set_config(QM_FLASH_0, &qm_cfg);
#if defined(CONFIG_SOC_QUARK_SE)
qm_flash_get_config(QM_FLASH_1, &qm_cfg);
if (enable) {
qm_cfg.write_disable = QM_FLASH_WRITE_DISABLE;
} else {
qm_cfg.write_disable = QM_FLASH_WRITE_ENABLE;
}
qm_flash_set_config(QM_FLASH_1, &qm_cfg);
#endif

View file

@ -24,9 +24,10 @@
#include <sys_io.h>
#include "qm_gpio.h"
#include "qm_scss.h"
#include "gpio_utils.h"
#include "gpio_api_compat.h"
#include "qm_isr.h"
#include "clk.h"
struct gpio_qmsi_config {
qm_gpio_t gpio;
@ -86,7 +87,7 @@ static void gpio_qmsi_callback(struct device *port, uint32_t status)
}
}
static void gpio_qmsi_0_int_callback(uint32_t status)
static void gpio_qmsi_0_int_callback(void *data, uint32_t status)
{
#ifndef CONFIG_GPIO_QMSI_0
return;
@ -98,7 +99,7 @@ static void gpio_qmsi_0_int_callback(uint32_t status)
}
#ifdef CONFIG_GPIO_QMSI_AON
static void gpio_qmsi_aon_int_callback(uint32_t status)
static void gpio_qmsi_aon_int_callback(void *data, uint32_t status)
{
struct device *port = DEVICE_GET(gpio_aon);
@ -126,7 +127,12 @@ static inline void qmsi_pin_config(struct device *port, uint32_t pin, int flags)
*/
qm_gpio_port_config_t cfg = { 0 };
qm_gpio_get_config(gpio, &cfg);
cfg.direction = QM_GPIO[gpio]->gpio_swporta_ddr;
cfg.int_en = QM_GPIO[gpio]->gpio_inten;
cfg.int_type = QM_GPIO[gpio]->gpio_inttype_level;
cfg.int_polarity = QM_GPIO[gpio]->gpio_int_polarity;
cfg.int_debounce = QM_GPIO[gpio]->gpio_debounce;
cfg.int_bothedge = QM_GPIO[gpio]->gpio_int_bothedge;
qmsi_write_bit(&cfg.direction, pin, (flags & GPIO_DIR_MASK));
@ -210,11 +216,13 @@ static inline int gpio_qmsi_read(struct device *port,
{
struct gpio_qmsi_config *gpio_config = port->config->config_info;
qm_gpio_t gpio = gpio_config->gpio;
qm_gpio_state_t state;
if (access_op == GPIO_ACCESS_BY_PIN) {
*value = qm_gpio_read_pin(gpio, pin);
qm_gpio_read_pin(gpio, pin, &state);
*value = state;
} else {
*value = qm_gpio_read_port(gpio);
qm_gpio_read_port(gpio, (uint32_t *const) &value);
}
return 0;

View file

@ -21,7 +21,8 @@
#include <ioapic.h>
#include "qm_i2c.h"
#include "qm_scss.h"
#include "qm_isr.h"
#include "clk.h"
/* Convenient macros to get the controller instance and the driver data. */
#define GET_CONTROLLER_INSTANCE(dev) \
@ -36,7 +37,7 @@ struct i2c_qmsi_config_info {
struct i2c_qmsi_driver_data {
device_sync_call_t sync;
qm_rc_t transfer_status;
int transfer_status;
};
static int i2c_qmsi_init(struct device *dev);
@ -99,45 +100,23 @@ static int i2c_qmsi_configure(struct device *dev, uint32_t config)
return -EINVAL;
}
if (qm_i2c_set_config(instance, &qm_cfg) != QM_RC_OK)
if (qm_i2c_set_config(instance, &qm_cfg) != 0)
return -EIO;
return 0;
}
static void transfer_complete(uint32_t id, qm_rc_t status)
static void transfer_complete(void *data, int rc, qm_i2c_status_t status,
uint32_t len)
{
struct device *dev;
struct device *dev = (struct device *) data;
struct i2c_qmsi_driver_data *driver_data;
switch (id) {
#ifdef CONFIG_I2C_0
case QM_I2C_0:
dev = DEVICE_GET(i2c_0);
break;
#endif
#ifdef CONFIG_I2C_1
case QM_I2C_1:
dev = DEVICE_GET(i2c_1);
break;
#endif
default:
return;
}
driver_data = GET_DRIVER_DATA(dev);
driver_data->transfer_status = status;
driver_data->transfer_status = rc;
device_sync_call_complete(&driver_data->sync);
}
static void complete_cb(uint32_t id, uint32_t len)
{
transfer_complete(id, QM_RC_OK);
}
static void err_cb(uint32_t id, qm_i2c_status_t status)
{
transfer_complete(id, status);
}
static int i2c_qmsi_transfer(struct device *dev, struct i2c_msg *msgs,
@ -145,44 +124,42 @@ static int i2c_qmsi_transfer(struct device *dev, struct i2c_msg *msgs,
{
struct i2c_qmsi_driver_data *driver_data = GET_DRIVER_DATA(dev);
qm_i2c_t instance = GET_CONTROLLER_INSTANCE(dev);
qm_rc_t rc;
qm_i2c_status_t status;
int rc;
if (qm_i2c_get_status(instance) != QM_I2C_IDLE)
qm_i2c_get_status(instance, &status);
if (status != QM_I2C_IDLE)
return -EBUSY;
if (msgs == NULL || num_msgs == 0)
return -ENOTSUP;
for (int i = 0; i < num_msgs; i++) {
uint8_t *buf = msgs[i].buf;
uint32_t len = msgs[i].len;
uint8_t op = msgs[i].flags & I2C_MSG_RW_MASK;
bool stop = (msgs[i].flags & I2C_MSG_STOP) == I2C_MSG_STOP;
qm_i2c_transfer_t xfer = { 0 };
if (op == I2C_MSG_WRITE) {
xfer.tx = buf;
xfer.tx_len = len;
xfer.tx_callback = complete_cb;
xfer.tx = msgs[i].buf;
xfer.tx_len = msgs[i].len;
} else {
xfer.rx = buf;
xfer.rx_len = len;
xfer.rx_callback = complete_cb;
xfer.rx = msgs[i].buf;
xfer.rx_len = msgs[i].len;
}
xfer.id = instance;
xfer.callback = transfer_complete;
xfer.callback_data = dev;
xfer.stop = stop;
xfer.err_callback = err_cb;
rc = qm_i2c_master_irq_transfer(instance, &xfer, addr);
if (rc != QM_RC_OK)
if (rc != 0)
return -EIO;
/* Block current thread until the I2C transfer completes. */
device_sync_call_wait(&driver_data->sync);
if (driver_data->transfer_status != QM_RC_OK)
return -EIO;
if (stop || op != I2C_MSG_WRITE) {
device_sync_call_wait(&driver_data->sync);
if (driver_data->transfer_status != 0) {
return -EIO;
}
}
}
return 0;

View file

@ -28,7 +28,7 @@ static int pinmux_dev_set(struct device *dev, uint32_t pin,
{
ARG_UNUSED(dev);
return qm_pmux_select(pin, func) == QM_RC_OK ? 0 : -EIO;
return qm_pmux_select(pin, func) == 0 ? 0 : -EIO;
}
static int pinmux_dev_get(struct device *dev, uint32_t pin,
@ -70,7 +70,7 @@ static int pinmux_dev_pullup(struct device *dev, uint32_t pin,
{
ARG_UNUSED(dev);
return qm_pmux_pullup_en(pin, func) == QM_RC_OK ? 0 : -EIO;
return qm_pmux_pullup_en(pin, func) == 0 ? 0 : -EIO;
}
static int pinmux_dev_input(struct device *dev, uint32_t pin,
@ -78,7 +78,7 @@ static int pinmux_dev_input(struct device *dev, uint32_t pin,
{
ARG_UNUSED(dev);
return qm_pmux_input_en(pin, func) == QM_RC_OK ? 0 : -EIO;
return qm_pmux_input_en(pin, func) == 0 ? 0 : -EIO;
}
static struct pinmux_driver_api api_funcs = {

View file

@ -22,7 +22,7 @@
#include <init.h>
#include "qm_pwm.h"
#include "qm_scss.h"
#include "clk.h"
static int pwm_qmsi_configure(struct device *dev, int access_op,
uint32_t pwm, int flags)
@ -58,7 +58,7 @@ static int __set_one_port(qm_pwm_t id, uint32_t pwm, uint32_t on, uint32_t off)
cfg.hi_count = on;
cfg.lo_count = off;
if (qm_pwm_set_config(id, pwm, &cfg) != QM_RC_OK) {
if (qm_pwm_set_config(id, pwm, &cfg) != 0) {
return -EIO;
}
/* Enable timer so it starts running and counting */

View file

@ -1,4 +1,4 @@
obj-$(CONFIG_QMSI_BUILTIN) += drivers/qm_scss.o
obj-$(CONFIG_QMSI_BUILTIN) += drivers/clk.o
obj-$(CONFIG_RTC_QMSI) += drivers/qm_rtc.o
obj-$(CONFIG_WDT_QMSI) += drivers/qm_wdt.o
obj-$(CONFIG_I2C_QMSI) += drivers/qm_i2c.o

View file

@ -1,6 +1,15 @@
The sources in this directory are imported from QMSI project
at https://github.com/01org/qmsi.
The current version supported in Zephyr is QMSI 1.0.1. See
https://github.com/01org/qmsi/releases/tag/v1.0.1
Intel® Quark™ Microcontroller Software Interface (QMSI) is a
Hardware Abstraction Layer (HAL) for Intel® Quark™
Microcontroller products. It currently support the following SoCs:
- Intel® Quark™ D2000 Microcontroller
- Intel® Quark™ SE Microcontroller
The current version supported in Zephyr is QMSI 1.1.0 See
https://github.com/01org/qmsi/releases/tag/v1.1.0
for more details.

View file

@ -0,0 +1,95 @@
#
# Copyright (c) 2016, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
SOC_ROOT_DIR = $(SOC)
SOC_MAKEFILE = $(SOC)
SUPPORTED_SOCS = quark_se \
quark_d2000
SOC ?= $(DEFAULT_SOC)
TARGET ?= $(DEFAULT_TARGET)
TARGETS_quark_d2000 = x86
TARGETS_quark_se = x86 \
sensor
ifeq ($(filter $(SOC),$(SUPPORTED_SOCS)),)
$(error SOC=$(SOC) is not supported. Run 'make help' for help)
endif
SUPPORTED_TARGETS = $(TARGETS_$(SOC))
ifeq ($(filter $(TARGET),$(SUPPORTED_TARGETS)),)
$(error TARGET=$(TARGET) is not supported for $(SOC). Run 'make help' for help)
endif
ifeq ($(TARGET), sensor)
SOC_ROOT_DIR = quark_se
SOC_MAKEFILE = sensor
endif
### Variables
BASE_DIR = ..
HEADERS = $(wildcard $(BASE_DIR)/include/*.h)
HEADERS += $(wildcard $(BASE_DIR)/soc/$(SOC_ROOT_DIR)/include/*.h)
HEADERS += $(wildcard $(BASE_DIR)/drivers/include/*.h)
ifeq ($(TARGET), sensor)
HEADERS += $(wildcard $(BASE_DIR)/drivers/sensor/include/*.h)
endif
EXPORTED_HEADERS += $(addprefix $(LIBQMSI_INCLUDE_DIR)/, $(notdir $(HEADERS)))
### Make includes
include $(BASE_DIR)/base.mk
include $(BASE_DIR)/drivers/drivers.mk
ifeq ($(TARGET), sensor)
include $(BASE_DIR)/drivers/sensor/sensor.mk
endif
include $(BASE_DIR)/soc/$(SOC_ROOT_DIR)/$(SOC_MAKEFILE).mk
include $(BASE_DIR)/soc/$(SOC_ROOT_DIR)/drivers/drivers.mk
SLINK_NAME := $(LIBQMSI_LIB_DIR)/libqmsi.a
$(LIBQMSI_LIB_DIR)/$(LIBQMSI_FILENAME): $(OBJECTS)
$(call mkdir, $(LIBQMSI_LIB_DIR))
$(AR) rcs $@ $?
ifeq ($(wildcard $(SLINK_NAME)),)
ifneq ($(OS),Windows_NT)
$(LN) -s $(LIBQMSI_FILENAME) $(SLINK_NAME)
endif
endif
$(LIBQMSI_INCLUDE_DIR)/%.h: $(HEADERS)
$(call mkdir, $(LIBQMSI_INCLUDE_DIR))
$(call copy, $(filter %/$(notdir $@), $(HEADERS)), $@)
.PHONY: all
all: $(LIBQMSI_LIB_DIR)/$(LIBQMSI_FILENAME) $(EXPORTED_HEADERS)

145
drivers/qmsi/drivers/apic.h Normal file
View file

@ -0,0 +1,145 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __APIC_H__
#define __APIC_H__
#include <stdint.h>
#include "qm_common.h"
#include "qm_soc_regs.h"
#define LAPIC_VECTOR_MASK (0xFF)
static void _ioapic_set_redtbl_entry(unsigned int irq, uint64_t value)
{
unsigned int offset = QM_IOAPIC_REG_REDTBL + (irq * 2);
QM_IOAPIC->ioregsel.reg = offset;
QM_IOAPIC->iowin.reg = value & 0x00000000FFFFFFFF;
QM_IOAPIC->ioregsel.reg = offset + 1;
QM_IOAPIC->iowin.reg = (value & 0xFFFFFFFF00000000) >> 32;
}
/* Get redirection table size */
static __inline__ int _ioapic_get_redtbl_size(void)
{
int max_entry_number;
QM_IOAPIC->ioregsel.reg = QM_IOAPIC_REG_VER;
max_entry_number = (QM_IOAPIC->iowin.reg & 0x00FF0000) >> 16;
return max_entry_number + 1;
}
static uint32_t _ioapic_get_redtbl_entry_lo(unsigned int irq)
{
QM_IOAPIC->ioregsel.reg = QM_IOAPIC_REG_REDTBL + (irq * 2);
return QM_IOAPIC->iowin.reg;
}
static void _ioapic_set_redtbl_entry_lo(unsigned int irq, uint32_t value)
{
QM_IOAPIC->ioregsel.reg = QM_IOAPIC_REG_REDTBL + (irq * 2);
QM_IOAPIC->iowin.reg = value;
}
/*
* Initialize Local and IOAPIC
*/
static __inline__ void apic_init(void)
{
int i;
int size;
/* Enable LAPIC */
QM_LAPIC->svr.reg |= BIT(8);
/* Set up LVT LINT0 to ExtINT and unmask it */
QM_LAPIC->lvtlint0.reg |= (BIT(8) | BIT(9) | BIT(10));
QM_LAPIC->lvtlint0.reg &= ~BIT(16);
/* Clear up any spurious LAPIC interrupts */
QM_LAPIC->eoi.reg = 0;
/* Setup IOAPIC Redirection Table */
size = _ioapic_get_redtbl_size();
for (i = 0; i < size; i++) {
_ioapic_set_redtbl_entry(i, BIT(16));
}
}
static __inline__ void ioapic_register_irq(unsigned int irq,
unsigned int vector)
{
uint32_t value;
value = _ioapic_get_redtbl_entry_lo(irq);
/* Assign vector and set polarity (positive). */
value &= ~LAPIC_VECTOR_MASK;
value |= (vector & LAPIC_VECTOR_MASK);
value &= ~BIT(13);
/* Set trigger mode. */
switch (irq) {
case QM_IRQ_RTC_0:
case QM_IRQ_AONPT_0:
case QM_IRQ_WDT_0:
/* Edge sensitive. */
value &= ~BIT(15);
break;
default:
/* Level sensitive. */
value |= BIT(15);
break;
}
_ioapic_set_redtbl_entry_lo(irq, value);
}
static __inline__ void ioapic_mask_irq(unsigned int irq)
{
uint32_t value = _ioapic_get_redtbl_entry_lo(irq);
value |= BIT(16);
_ioapic_set_redtbl_entry_lo(irq, value);
}
static __inline__ void ioapic_unmask_irq(unsigned int irq)
{
uint32_t value = _ioapic_get_redtbl_entry_lo(irq);
value &= ~BIT(16);
_ioapic_set_redtbl_entry_lo(irq, value);
}
#endif /* __APIC_H__ */

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@ -27,10 +27,21 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "qm_scss.h"
#include "clk.h"
#include "flash_layout.h"
#if (!QM_SENSOR) || (UNIT_TEST)
#include <x86intrin.h>
#endif
#define OSC0_CFG1_SI_FREQ_SEL_MASK (0x00000300)
#define OSC0_CFG1_SI_FREQ_SEL_OFFS (8)
#if (QM_SENSOR) && (!UNIT_TEST)
/* Timestamp counter for Sensor Subsystem is 32bit. */
#define get_ticks() __builtin_arc_lr(QM_SS_TSC_BASE + QM_SS_TIMER_COUNT)
#elif(QM_SENSOR) && (UNIT_TEST)
#define get_ticks() _rdtsc() % ((uint32_t)-1)
#else
/* 64bit Timestamp counter */
#define get_ticks() _rdtsc()
#endif
/* NOTE: Currently user space data / bss section overwrites the ROM data / bss
* sections, so anything that is set in the ROM will be obliterated once we jump
@ -38,10 +49,11 @@
*/
static uint32_t ticks_per_us = SYS_TICKS_PER_US_32MHZ;
qm_rc_t clk_sys_set_mode(const clk_sys_mode_t mode, const clk_sys_div_t div)
int clk_sys_set_mode(const clk_sys_mode_t mode, const clk_sys_div_t div)
{
QM_CHECK(div <= CLK_SYS_DIV_NUM, QM_RC_EINVAL);
QM_CHECK(mode <= CLK_SYS_CRYSTAL_OSC, QM_RC_EINVAL);
QM_CHECK(div <= CLK_SYS_DIV_NUM, -EINVAL);
QM_CHECK(mode <= CLK_SYS_CRYSTAL_OSC, -EINVAL);
uint16_t trim = 0;
/* Store system ticks per us */
uint32_t sys_ticks_per_us = 1;
@ -71,23 +83,38 @@ qm_rc_t clk_sys_set_mode(const clk_sys_mode_t mode, const clk_sys_div_t div)
* versa.
*/
switch (mode) {
case CLK_SYS_HYB_OSC_32MHZ:
case CLK_SYS_HYB_OSC_16MHZ:
case CLK_SYS_HYB_OSC_8MHZ:
case CLK_SYS_HYB_OSC_4MHZ:
/* Calculate the system clock ticks per microsecond */
/* Calculate the system clock ticks per microsecond
* and get the shadowed trim code from the Data Region of Flash.
*/
if (CLK_SYS_HYB_OSC_32MHZ == mode) {
sys_ticks_per_us = SYS_TICKS_PER_US_32MHZ / BIT(div);
trim = QM_FLASH_DATA_TRIM_CODE->osc_trim_32mhz;
} else if (CLK_SYS_HYB_OSC_16MHZ == mode) {
sys_ticks_per_us = SYS_TICKS_PER_US_16MHZ / BIT(div);
trim = QM_FLASH_DATA_TRIM_CODE->osc_trim_16mhz;
} else if (CLK_SYS_HYB_OSC_8MHZ == mode) {
sys_ticks_per_us = SYS_TICKS_PER_US_8MHZ / BIT(div);
trim = QM_FLASH_DATA_TRIM_CODE->osc_trim_8mhz;
} else {
sys_ticks_per_us = SYS_TICKS_PER_US_4MHZ / BIT(div);
trim = QM_FLASH_DATA_TRIM_CODE->osc_trim_4mhz;
}
/*
* Apply trim code for the selected mode if this has been
* written in the soc_data section.
* This is performed in rom on the first boot for each
* available frequency.
* If not present, something went wrong and trim code
* will not be applied.
*/
if ((trim & QM_FLASH_TRIM_PRESENT_MASK) ==
QM_FLASH_TRIM_PRESENT) {
clk_trim_apply(trim);
}
/* Note: Set (calculate if needed) trim code */
/* Select the silicon oscillator frequency */
QM_SCSS_CCU->osc0_cfg1 &= ~OSC0_CFG1_SI_FREQ_SEL_MASK;
QM_SCSS_CCU->osc0_cfg1 |= (mode << OSC0_CFG1_SI_FREQ_SEL_OFFS);
@ -130,30 +157,54 @@ qm_rc_t clk_sys_set_mode(const clk_sys_mode_t mode, const clk_sys_div_t div)
QM_SCSS_CCU->ccu_sys_clk_ctl |= QM_CCU_SYS_CLK_DIV_EN;
ticks_per_us = (sys_ticks_per_us > 0 ? sys_ticks_per_us : 1);
return QM_RC_OK;
return 0;
}
qm_rc_t clk_adc_set_div(const uint16_t div)
int clk_trim_read(uint32_t *const value)
{
QM_CHECK(NULL != value, -EINVAL);
*value = (QM_SCSS_CCU->osc0_cfg1 & OSC0_CFG1_FTRIMOTP_MASK) >>
OSC0_CFG1_FTRIMOTP_OFFS;
return 0;
}
int clk_trim_apply(const uint32_t value)
{
/* Apply trim code */
QM_SCSS_CCU->osc0_cfg1 &= ~OSC0_CFG1_FTRIMOTP_MASK;
QM_SCSS_CCU->osc0_cfg1 |=
(value << OSC0_CFG1_FTRIMOTP_OFFS) & OSC0_CFG1_FTRIMOTP_MASK;
return 0;
}
int clk_adc_set_div(const uint16_t div)
{
#if (QUARK_D2000)
/*
* The driver adds 1 to the value, so to avoid confusion for the user,
* subtract 1 from the input value.
*/
QM_CHECK((div - 1) <= QM_ADC_DIV_MAX, QM_RC_EINVAL);
QM_CHECK((div - 1) <= QM_ADC_DIV_MAX, -EINVAL);
uint32_t reg = QM_SCSS_CCU->ccu_periph_clk_div_ctl0;
reg &= CLK_ADC_DIV_DEF_MASK;
reg |= ((div - 1) << QM_CCU_ADC_CLK_DIV_OFFSET);
QM_SCSS_CCU->ccu_periph_clk_div_ctl0 = reg;
#else
/* TODO this function should only be made available on D2000 */
(void)div;
#endif
return QM_RC_OK;
return 0;
}
qm_rc_t clk_periph_set_div(const clk_periph_div_t div)
int clk_periph_set_div(const clk_periph_div_t div)
{
QM_CHECK(div <= CLK_PERIPH_DIV_8, QM_RC_EINVAL);
QM_CHECK(div <= CLK_PERIPH_DIV_8, -EINVAL);
#if (QUARK_D2000)
uint32_t reg =
@ -170,12 +221,12 @@ qm_rc_t clk_periph_set_div(const clk_periph_div_t div)
QM_SCSS_CCU->ccu_periph_clk_div_ctl0 |= QM_CCU_PERIPH_PCLK_DIV_EN;
#endif
return QM_RC_OK;
return 0;
}
qm_rc_t clk_gpio_db_set_div(const clk_gpio_db_div_t div)
int clk_gpio_db_set_div(const clk_gpio_db_div_t div)
{
QM_CHECK(div <= CLK_GPIO_DB_DIV_128, QM_RC_EINVAL);
QM_CHECK(div <= CLK_GPIO_DB_DIV_128, -EINVAL);
uint32_t reg =
QM_SCSS_CCU->ccu_gpio_db_clk_ctl & CLK_GPIO_DB_DIV_DEF_MASK;
@ -184,12 +235,12 @@ qm_rc_t clk_gpio_db_set_div(const clk_gpio_db_div_t div)
/* CLK Div en bit must be written from 0 -> 1 to apply new value */
QM_SCSS_CCU->ccu_gpio_db_clk_ctl |= QM_CCU_GPIO_DB_CLK_DIV_EN;
return QM_RC_OK;
return 0;
}
qm_rc_t clk_ext_set_div(const clk_ext_div_t div)
int clk_ext_set_div(const clk_ext_div_t div)
{
QM_CHECK(div <= CLK_EXT_DIV_8, QM_RC_EINVAL);
QM_CHECK(div <= CLK_EXT_DIV_8, -EINVAL);
uint32_t reg = QM_SCSS_CCU->ccu_ext_clock_ctl & CLK_EXTERN_DIV_DEF_MASK;
reg |= (div << QM_CCU_EXTERN_DIV_OFFSET);
@ -197,12 +248,12 @@ qm_rc_t clk_ext_set_div(const clk_ext_div_t div)
/* CLK Div en bit must be written from 0 -> 1 to apply new value */
QM_SCSS_CCU->ccu_ext_clock_ctl |= QM_CCU_EXT_CLK_DIV_EN;
return QM_RC_OK;
return 0;
}
qm_rc_t clk_rtc_set_div(const clk_rtc_div_t div)
int clk_rtc_set_div(const clk_rtc_div_t div)
{
QM_CHECK(div <= CLK_RTC_DIV_32768, QM_RC_EINVAL);
QM_CHECK(div <= CLK_RTC_DIV_32768, -EINVAL);
uint32_t reg = QM_SCSS_CCU->ccu_sys_clk_ctl & CLK_RTC_DIV_DEF_MASK;
reg |= (div << QM_CCU_RTC_CLK_DIV_OFFSET);
@ -210,25 +261,25 @@ qm_rc_t clk_rtc_set_div(const clk_rtc_div_t div)
/* CLK Div en bit must be written from 0 -> 1 to apply new value */
QM_SCSS_CCU->ccu_sys_clk_ctl |= QM_CCU_RTC_CLK_DIV_EN;
return QM_RC_OK;
return 0;
}
qm_rc_t clk_periph_enable(const clk_periph_t clocks)
int clk_periph_enable(const clk_periph_t clocks)
{
QM_CHECK(clocks <= CLK_PERIPH_ALL, QM_RC_EINVAL);
QM_CHECK(clocks <= CLK_PERIPH_ALL, -EINVAL);
QM_SCSS_CCU->ccu_periph_clk_gate_ctl |= clocks;
return QM_RC_OK;
return 0;
}
qm_rc_t clk_periph_disable(const clk_periph_t clocks)
int clk_periph_disable(const clk_periph_t clocks)
{
QM_CHECK(clocks <= CLK_PERIPH_ALL, QM_RC_EINVAL);
QM_CHECK(clocks <= CLK_PERIPH_ALL, -EINVAL);
QM_SCSS_CCU->ccu_periph_clk_gate_ctl &= ~clocks;
return QM_RC_OK;
return 0;
}
uint32_t clk_sys_get_ticks_per_us(void)
@ -239,10 +290,13 @@ uint32_t clk_sys_get_ticks_per_us(void)
void clk_sys_udelay(uint32_t microseconds)
{
uint32_t timeout = ticks_per_us * microseconds;
#if (QM_SENSOR)
uint32_t tsc_start;
#else
unsigned long long tsc_start;
tsc_start = _rdtsc();
#endif
tsc_start = get_ticks();
/* We need to wait until timeout system clock ticks has occurred. */
while (_rdtsc() - tsc_start < timeout) {
while (get_ticks() - tsc_start < timeout) {
}
}

397
drivers/qmsi/drivers/dma.h Normal file
View file

@ -0,0 +1,397 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __DMA_H_
#define __DMA_H_
#include <errno.h>
#include "clk.h"
#include "qm_dma.h"
/* Timeout definitions */
#define STANDARD_TIMEOUT_MICROSECOND (1000)
#define ONE_MICROSECOND (1)
/* Set specific register bits */
#define UPDATE_REG_BITS(reg, value, offset, mask) \
{ \
reg &= ~mask; \
reg |= (value << offset); \
} \
while (0)
/* Mask for all supported channels */
#define CHANNEL_MASK_ALL (BIT(QM_DMA_CHANNEL_NUM) - 1)
/*
* DMA Transfer Type
*/
typedef enum {
QM_DMA_TYPE_SINGLE = 0x0, /**< Single block mode. */
} dma_transfer_type_t;
/*
* DMA address increment type.
*/
typedef enum {
QM_DMA_ADDRESS_INCREMENT = 0x0, /**< Increment address. */
QM_DMA_ADDRESS_DECREMENT = 0x1, /**< Decrement address. */
QM_DMA_ADDRESS_NO_CHANGE = 0x2 /**< Don't modify address. */
} qm_dma_address_increment_t;
/*
* DMA channel private structure.
*/
typedef struct dma_cfg_prv_t {
/* DMA client context to be passed back with callbacks */
void *callback_context;
/* DMA channel transfer callback */
void (*client_callback)(void *callback_context, uint32_t len,
int error_code);
} dma_cfg_prv_t;
/*
* The length of the transfer at the time that this function is called is
* returned. The value returned is defined in bytes.
*/
static __inline__ uint32_t
get_transfer_length(const qm_dma_t dma, const qm_dma_channel_id_t channel_id)
{
uint32_t transfer_length;
uint32_t source_transfer_width;
volatile qm_dma_chan_reg_t *chan_reg =
&QM_DMA[dma]->chan_reg[channel_id];
/* Read the source transfer width register value. */
source_transfer_width =
((chan_reg->ctrl_low & QM_DMA_CTL_L_SRC_TR_WIDTH_MASK) >>
QM_DMA_CTL_L_SRC_TR_WIDTH_OFFSET);
/* Read the length from the block_ts field. The units of this field
* are dependent on the source transfer width. */
transfer_length = ((chan_reg->ctrl_high & QM_DMA_CTL_H_BLOCK_TS_MASK) >>
QM_DMA_CTL_H_BLOCK_TS_OFFSET);
/* To convert this to bytes the transfer length can be shifted using
* the source transfer width value. This value correspond to the
* shifts required and so this can be done as an optimization. */
return (transfer_length << source_transfer_width);
}
static __inline__ int dma_controller_disable(const qm_dma_t dma)
{
volatile qm_dma_misc_reg_t *misc_reg = &QM_DMA[dma]->misc_reg;
misc_reg->cfg_low = 0;
if (misc_reg->cfg_low) {
return -EIO;
}
return 0;
}
static __inline__ void dma_controller_enable(const qm_dma_t dma)
{
QM_DMA[dma]->misc_reg.cfg_low = QM_DMA_MISC_CFG_DMA_EN;
}
static int dma_channel_disable(const qm_dma_t dma,
const qm_dma_channel_id_t channel_id)
{
uint8_t channel_mask = BIT(channel_id);
uint16_t timeout_us;
volatile qm_dma_misc_reg_t *misc_reg = &QM_DMA[dma]->misc_reg;
volatile qm_dma_chan_reg_t *chan_reg =
&QM_DMA[dma]->chan_reg[channel_id];
/* If the channel is already disabled return */
if (!(misc_reg->chan_en_low & channel_mask)) {
return 0;
}
/* Suspend the channel */
chan_reg->cfg_low |= QM_DMA_CFG_L_CH_SUSP_MASK;
/* Ensure that the channel has been suspended */
timeout_us = STANDARD_TIMEOUT_MICROSECOND;
while ((!(chan_reg->cfg_low & QM_DMA_CFG_L_CH_SUSP_MASK)) &&
timeout_us) {
clk_sys_udelay(ONE_MICROSECOND);
timeout_us--;
}
if (!(chan_reg->cfg_low & QM_DMA_CFG_L_CH_SUSP_MASK)) {
return -EIO;
}
/* Wait until the fifo is empty */
timeout_us = STANDARD_TIMEOUT_MICROSECOND;
while ((!(chan_reg->cfg_low & QM_DMA_CFG_L_FIFO_EMPTY_MASK)) &&
timeout_us) {
clk_sys_udelay(ONE_MICROSECOND);
timeout_us--;
}
if (!(chan_reg->cfg_low & QM_DMA_CFG_L_FIFO_EMPTY_MASK)) {
return -EIO;
}
/* Disable the channel and wait to confirm that it has been disabled. */
misc_reg->chan_en_low = (channel_mask << QM_DMA_MISC_CHAN_EN_WE_OFFSET);
timeout_us = STANDARD_TIMEOUT_MICROSECOND;
while ((misc_reg->chan_en_low & channel_mask) && timeout_us) {
clk_sys_udelay(ONE_MICROSECOND);
timeout_us--;
}
if (misc_reg->chan_en_low & channel_mask) {
return -EIO;
}
/* Set the channel to resume */
chan_reg->cfg_low &= ~QM_DMA_CFG_L_CH_SUSP_MASK;
return 0;
}
static __inline__ void dma_channel_enable(const qm_dma_t dma,
const qm_dma_channel_id_t channel_id)
{
uint8_t channel_mask = BIT(channel_id);
QM_DMA[dma]->misc_reg.chan_en_low =
(channel_mask << QM_DMA_MISC_CHAN_EN_WE_OFFSET) | channel_mask;
}
static __inline__ void
dma_interrupt_disable(const qm_dma_t dma, const qm_dma_channel_id_t channel_id)
{
volatile qm_dma_chan_reg_t *chan_reg =
&QM_DMA[dma]->chan_reg[channel_id];
chan_reg->ctrl_low &= ~QM_DMA_CTL_L_INT_EN_MASK;
}
static __inline__ void
dma_interrupt_enable(const qm_dma_t dma, const qm_dma_channel_id_t channel_id)
{
volatile qm_dma_chan_reg_t *chan_reg =
&QM_DMA[dma]->chan_reg[channel_id];
chan_reg->ctrl_low |= QM_DMA_CTL_L_INT_EN_MASK;
}
static __inline__ int
dma_set_transfer_type(const qm_dma_t dma, const qm_dma_channel_id_t channel_id,
const dma_transfer_type_t transfer_type)
{
volatile qm_dma_chan_reg_t *chan_reg =
&QM_DMA[dma]->chan_reg[channel_id];
/* Currently only single block is supported */
switch (transfer_type) {
case QM_DMA_TYPE_SINGLE:
chan_reg->llp_low = 0x0;
chan_reg->ctrl_low &= ~QM_DMA_CTL_L_LLP_SRC_EN_MASK;
chan_reg->ctrl_low &= ~QM_DMA_CTL_L_LLP_DST_EN_MASK;
chan_reg->cfg_low &= ~QM_DMA_CFG_L_RELOAD_SRC_MASK;
chan_reg->cfg_low &= ~QM_DMA_CFG_L_RELOAD_DST_MASK;
break;
default:
return -EINVAL;
}
return 0;
}
static __inline__ void
dma_set_source_transfer_width(const qm_dma_t dma,
const qm_dma_channel_id_t channel_id,
const qm_dma_transfer_width_t transfer_width)
{
volatile qm_dma_chan_reg_t *chan_reg =
&QM_DMA[dma]->chan_reg[channel_id];
UPDATE_REG_BITS(chan_reg->ctrl_low, transfer_width,
QM_DMA_CTL_L_SRC_TR_WIDTH_OFFSET,
QM_DMA_CTL_L_SRC_TR_WIDTH_MASK);
}
static __inline__ void
dma_set_destination_transfer_width(const qm_dma_t dma,
const qm_dma_channel_id_t channel_id,
const qm_dma_transfer_width_t transfer_width)
{
volatile qm_dma_chan_reg_t *chan_reg =
&QM_DMA[dma]->chan_reg[channel_id];
UPDATE_REG_BITS(chan_reg->ctrl_low, transfer_width,
QM_DMA_CTL_L_DST_TR_WIDTH_OFFSET,
QM_DMA_CTL_L_DST_TR_WIDTH_MASK);
}
static __inline__ void
dma_set_source_burst_length(const qm_dma_t dma,
const qm_dma_channel_id_t channel_id,
const qm_dma_burst_length_t burst_length)
{
volatile qm_dma_chan_reg_t *chan_reg =
&QM_DMA[dma]->chan_reg[channel_id];
UPDATE_REG_BITS(chan_reg->ctrl_low, burst_length,
QM_DMA_CTL_L_SRC_MSIZE_OFFSET,
QM_DMA_CTL_L_SRC_MSIZE_MASK);
}
static __inline__ void
dma_set_destination_burst_length(const qm_dma_t dma,
const qm_dma_channel_id_t channel_id,
const qm_dma_burst_length_t burst_length)
{
volatile qm_dma_chan_reg_t *chan_reg =
&QM_DMA[dma]->chan_reg[channel_id];
UPDATE_REG_BITS(chan_reg->ctrl_low, burst_length,
QM_DMA_CTL_L_DEST_MSIZE_OFFSET,
QM_DMA_CTL_L_DEST_MSIZE_MASK);
}
static __inline__ void
dma_set_transfer_direction(const qm_dma_t dma,
const qm_dma_channel_id_t channel_id,
const qm_dma_channel_direction_t transfer_direction)
{
volatile qm_dma_chan_reg_t *chan_reg =
&QM_DMA[dma]->chan_reg[channel_id];
UPDATE_REG_BITS(chan_reg->ctrl_low, transfer_direction,
QM_DMA_CTL_L_TT_FC_OFFSET, QM_DMA_CTL_L_TT_FC_MASK);
}
static __inline__ void
dma_set_source_increment(const qm_dma_t dma,
const qm_dma_channel_id_t channel_id,
const qm_dma_address_increment_t address_increment)
{
volatile qm_dma_chan_reg_t *chan_reg =
&QM_DMA[dma]->chan_reg[channel_id];
UPDATE_REG_BITS(chan_reg->ctrl_low, address_increment,
QM_DMA_CTL_L_SINC_OFFSET, QM_DMA_CTL_L_SINC_MASK);
}
static __inline__ void dma_set_destination_increment(
const qm_dma_t dma, const qm_dma_channel_id_t channel_id,
const qm_dma_address_increment_t address_increment)
{
volatile qm_dma_chan_reg_t *chan_reg =
&QM_DMA[dma]->chan_reg[channel_id];
UPDATE_REG_BITS(chan_reg->ctrl_low, address_increment,
QM_DMA_CTL_L_DINC_OFFSET, QM_DMA_CTL_L_DINC_MASK);
}
static __inline__ void dma_set_handshake_interface(
const qm_dma_t dma, const qm_dma_channel_id_t channel_id,
const qm_dma_handshake_interface_t handshake_interface)
{
volatile qm_dma_chan_reg_t *chan_reg =
&QM_DMA[dma]->chan_reg[channel_id];
UPDATE_REG_BITS(chan_reg->cfg_high, handshake_interface,
QM_DMA_CFG_H_SRC_PER_OFFSET, QM_DMA_CFG_H_SRC_PER_MASK);
UPDATE_REG_BITS(chan_reg->cfg_high, handshake_interface,
QM_DMA_CFG_H_DEST_PER_OFFSET,
QM_DMA_CFG_H_DEST_PER_MASK);
}
static __inline__ void
dma_set_handshake_type(const qm_dma_t dma, const qm_dma_channel_id_t channel_id,
const uint8_t handshake_type)
{
volatile qm_dma_chan_reg_t *chan_reg =
&QM_DMA[dma]->chan_reg[channel_id];
UPDATE_REG_BITS(chan_reg->cfg_low, handshake_type,
QM_DMA_CFG_L_HS_SEL_SRC_OFFSET,
QM_DMA_CFG_L_HS_SEL_SRC_MASK);
UPDATE_REG_BITS(chan_reg->cfg_low, handshake_type,
QM_DMA_CFG_L_HS_SEL_DST_OFFSET,
QM_DMA_CFG_L_HS_SEL_DST_MASK);
}
static __inline__ void
dma_set_handshake_polarity(const qm_dma_t dma,
const qm_dma_channel_id_t channel_id,
const qm_dma_handshake_polarity_t handshake_polarity)
{
volatile qm_dma_chan_reg_t *chan_reg =
&QM_DMA[dma]->chan_reg[channel_id];
UPDATE_REG_BITS(chan_reg->cfg_low, handshake_polarity,
QM_DMA_CFG_L_SRC_HS_POL_OFFSET,
QM_DMA_CFG_L_SRC_HS_POL_MASK);
UPDATE_REG_BITS(chan_reg->cfg_low, handshake_polarity,
QM_DMA_CFG_L_DST_HS_POL_OFFSET,
QM_DMA_CFG_L_DST_HS_POL_MASK);
}
static __inline__ void
dma_set_source_address(const qm_dma_t dma, const qm_dma_channel_id_t channel_id,
const uint32_t source_address)
{
QM_DMA[dma]->chan_reg[channel_id].sar_low = source_address;
}
static __inline__ void
dma_set_destination_address(const qm_dma_t dma,
const qm_dma_channel_id_t channel_id,
const uint32_t destination_address)
{
QM_DMA[dma]->chan_reg[channel_id].dar_low = destination_address;
}
static __inline__ void dma_set_block_size(const qm_dma_t dma,
const qm_dma_channel_id_t channel_id,
const uint32_t block_size)
{
volatile qm_dma_chan_reg_t *chan_reg =
&QM_DMA[dma]->chan_reg[channel_id];
UPDATE_REG_BITS(chan_reg->ctrl_high, block_size,
QM_DMA_CTL_H_BLOCK_TS_OFFSET,
QM_DMA_CTL_H_BLOCK_TS_MASK);
}
#endif /* __DMA_H_ */

111
drivers/qmsi/drivers/idt.h Normal file
View file

@ -0,0 +1,111 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __IDT_H__
#define __IDT_H__
#include <stdint.h>
#include <string.h>
#include "qm_common.h"
#include "qm_soc_regs.h"
#if (QUARK_SE)
#define IDT_NUM_GATES (68)
#elif(QUARK_D2000)
#define IDT_NUM_GATES (52)
#endif
#define IDT_SIZE (sizeof(intr_gate_desc_t) * IDT_NUM_GATES)
typedef struct idtr {
uint16_t limit;
uint32_t base;
} __attribute__((packed)) idtr_t;
typedef struct intr_gate_desc {
uint16_t isr_low;
uint16_t selector; /* Segment selector */
/* The format of conf is the following:
15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
|p |dpl |ss|d |type | unused |
type: Gate type
d: size of Gate
ss: Storage Segment
dpl: Descriptor Privilege level
p: Segment present level
*/
uint16_t conf;
uint16_t isr_high;
} __attribute__((packed)) intr_gate_desc_t;
extern intr_gate_desc_t __idt_start[];
/*
* Setup IDT gate as an interrupt descriptor and assing the ISR entry point
*/
static __inline__ void idt_set_intr_gate_desc(uint32_t vector, uint32_t isr)
{
intr_gate_desc_t *desc;
desc = __idt_start + vector;
desc->isr_low = isr & 0xFFFF;
desc->selector = 0x08; /* Code segment offset in GDT */
desc->conf = 0x8E00; /* type: 0b11 (Interrupt)
d: 1 (32 bits)
ss: 0
dpl: 0
p: 1
*/
desc->isr_high = (isr >> 16) & 0xFFFF;
}
/*
* Initialize Interrupt Descriptor Table.
* The IDT is initialized with null descriptors: any interrupt at this stage
* will cause a triple fault.
*/
static __inline__ void idt_init(void)
{
idtr_t idtr;
memset(__idt_start, 0x00, IDT_SIZE);
/* Initialize idtr structure */
idtr.limit = IDT_SIZE - 1;
idtr.base = (uint32_t)__idt_start;
/* Load IDTR register */
__asm__ __volatile__("lidt %0\n\t" ::"m"(idtr));
}
#endif /* __IDT_H__ */

View file

@ -0,0 +1,307 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __CLK_H__
#define __CLK_H__
#include "qm_common.h"
#include "qm_soc_regs.h"
#if (QM_SENSOR)
#include "qm_sensor_regs.h"
#endif
/**
* Clock Management.
*
* @defgroup groupCLK Clock Management
* @{
*/
/**
* When using an external crystal, this value must be set to the number of
* system ticks per micro second. The expected value is 32 ticks for a 32MHz
* crystal.
*/
#define SYS_TICKS_PER_US_XTAL (32)
/** System ticks per microseconds for a 32MHz oscillator. */
#define SYS_TICKS_PER_US_32MHZ (32)
/** System ticks per microseconds for a 16MHz oscillator. */
#define SYS_TICKS_PER_US_16MHZ (16)
/** System ticks per microseconds for a 8MHz oscillator. */
#define SYS_TICKS_PER_US_8MHZ (8)
/** System ticks per microseconds for a 4MHz oscillator. */
#define SYS_TICKS_PER_US_4MHZ (4)
/**
* System clock divider type.
*/
typedef enum {
CLK_SYS_DIV_1, /**< Clock Divider = 1. */
CLK_SYS_DIV_2, /**< Clock Divider = 2. */
CLK_SYS_DIV_4, /**< Clock Divider = 4. */
CLK_SYS_DIV_8, /**< Clock Divider = 8. */
#if (QUARK_D2000)
CLK_SYS_DIV_16, /**< Clock Divider = 16. */
CLK_SYS_DIV_32, /**< Clock Divider = 32. */
CLK_SYS_DIV_64, /**< Clock Divider = 64. */
CLK_SYS_DIV_128, /**< Clock Divider = 128. */
#endif
CLK_SYS_DIV_NUM
} clk_sys_div_t;
/**
* System clock mode type.
*/
typedef enum {
CLK_SYS_HYB_OSC_32MHZ, /**< 32MHz Hybrid Oscillator Clock. */
CLK_SYS_HYB_OSC_16MHZ, /**< 16MHz Hybrid Oscillator Clock. */
CLK_SYS_HYB_OSC_8MHZ, /**< 8MHz Hybrid Oscillator Clock. */
CLK_SYS_HYB_OSC_4MHZ, /**< 4MHz Hybrid Oscillator Clock. */
CLK_SYS_RTC_OSC, /**< Real Time Clock. */
CLK_SYS_CRYSTAL_OSC /**< Crystal Oscillator Clock. */
} clk_sys_mode_t;
/**
* Peripheral clock divider type.
*/
typedef enum {
CLK_PERIPH_DIV_1, /**< Peripheral Clock Divider = 1. */
CLK_PERIPH_DIV_2, /**< Peripheral Clock Divider = 2. */
CLK_PERIPH_DIV_4, /**< Peripheral Clock Divider = 4. */
CLK_PERIPH_DIV_8 /**< Peripheral Clock Divider = 8. */
} clk_periph_div_t;
/**
* GPIO clock debounce divider type.
*/
typedef enum {
CLK_GPIO_DB_DIV_1, /**< GPIO Clock Debounce Divider = 1. */
CLK_GPIO_DB_DIV_2, /**< GPIO Clock Debounce Divider = 2. */
CLK_GPIO_DB_DIV_4, /**< GPIO Clock Debounce Divider = 4. */
CLK_GPIO_DB_DIV_8, /**< GPIO Clock Debounce Divider = 8. */
CLK_GPIO_DB_DIV_16, /**< GPIO Clock Debounce Divider = 16. */
CLK_GPIO_DB_DIV_32, /**< GPIO Clock Debounce Divider = 32. */
CLK_GPIO_DB_DIV_64, /**< GPIO Clock Debounce Divider = 64. */
CLK_GPIO_DB_DIV_128 /**< GPIO Clock Debounce Divider = 128. */
} clk_gpio_db_div_t;
/**
* External crystal clock divider type.
*/
typedef enum {
CLK_EXT_DIV_1, /**< External Crystal Clock Divider = 1. */
CLK_EXT_DIV_2, /**< External Crystal Clock Divider = 2. */
CLK_EXT_DIV_4, /**< External Crystal Clock Divider = 4. */
CLK_EXT_DIV_8 /**< External Crystal Clock Divider = 8. */
} clk_ext_div_t;
/**
* RTC clock divider type.
*/
typedef enum {
CLK_RTC_DIV_1, /**< Real Time Clock Divider = 1. */
CLK_RTC_DIV_2, /**< Real Time Clock Divider = 2. */
CLK_RTC_DIV_4, /**< Real Time Clock Divider = 4. */
CLK_RTC_DIV_8, /**< Real Time Clock Divider = 8. */
CLK_RTC_DIV_16, /**< Real Time Clock Divider = 16. */
CLK_RTC_DIV_32, /**< Real Time Clock Divider = 32. */
CLK_RTC_DIV_64, /**< Real Time Clock Divider = 64. */
CLK_RTC_DIV_128, /**< Real Time Clock Divider = 128. */
CLK_RTC_DIV_256, /**< Real Time Clock Divider = 256. */
CLK_RTC_DIV_512, /**< Real Time Clock Divider = 512. */
CLK_RTC_DIV_1024, /**< Real Time Clock Divider = 1024. */
CLK_RTC_DIV_2048, /**< Real Time Clock Divider = 2048. */
CLK_RTC_DIV_4096, /**< Real Time Clock Divider = 4096. */
CLK_RTC_DIV_8192, /**< Real Time Clock Divider = 8192. */
CLK_RTC_DIV_16384, /**< Real Time Clock Divider = 16384. */
CLK_RTC_DIV_32768 /**< Real Time Clock Divider = 32768. */
} clk_rtc_div_t;
/**
* Set clock mode and divisor.
*
* Change the operating mode and clock divisor of the system
* clock source. Changing this clock speed affects all
* peripherals.
* This applies the correct trim code if available.
*
* If trim code is not available, it is not computed
* and previous trim code is not modified.
*
* @param[in] mode System clock source operating mode.
* @param[in] div System clock divisor.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int clk_sys_set_mode(const clk_sys_mode_t mode, const clk_sys_div_t div);
/**
* Read the silicon oscillator trim code for the current frequency.
*
* @param[out] value Pointer to store the trim code.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int clk_trim_read(uint32_t *const value);
/**
* Apply silicon oscillator trim code.
*
* @param[in] value Trim code to apply.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int clk_trim_apply(const uint32_t value);
/**
* Change divider value of ADC clock.
*
* Change ADC clock divider value. The new divider value is set to N, where N is
* the value set by the function and is between 1 and 1024.
* This function is only available on D2000.
*
* @param[in] div Divider value for the ADC clock.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int clk_adc_set_div(const uint16_t div);
/**
* Change divider value of peripheral clock.
*
* Change Peripheral clock divider value.
* The maximum divisor is /8.
* Refer to the list of supported peripherals for your SoC.
*
* @param[in] div Divider value for the peripheral clock.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int clk_periph_set_div(const clk_periph_div_t div);
/**
* Change divider value of GPIO debounce clock.
*
* Change GPIO debounce clock divider value.
* The maximum divisor is /128.
*
* @param[in] div Divider value for the GPIO debounce clock.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int clk_gpio_db_set_div(const clk_gpio_db_div_t div);
/**
* Change divider value of external clock.
*
* Change External clock divider value.
* The maximum divisor is /8.
*
* @param[in] div Divider value for the external clock.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int clk_ext_set_div(const clk_ext_div_t div);
/**
* Change divider value of RTC.
*
* Change RTC divider value.
* The maximum divisor is /32768.
*
* @param[in] div Divider value for the RTC.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int clk_rtc_set_div(const clk_rtc_div_t div);
/**
* Enable clocks for peripherals / registers.
*
* @param[in] clocks Which peripheral and register clocks to enable.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int clk_periph_enable(const clk_periph_t clocks);
/**
* Disable clocks for peripherals / registers.
*
* @param[in] clocks Which peripheral and register clocks to disable.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int clk_periph_disable(const clk_periph_t clocks);
/**
* Get number of system ticks per micro second.
*
* @return uint32_t Number of system ticks per micro second.
*/
uint32_t clk_sys_get_ticks_per_us(void);
/**
* Idle loop the processor for at least the value given in microseconds.
*
* This function will wait until at least the given number of microseconds has
* elapsed since calling this function.
*
* Note:
* It is dependent on the system clock speed.
* The delay parameter does not include, calling the function, returning
* from it, calculation setup and while loops.
*
* @param[in] microseconds Minimum number of micro seconds to delay for.
*/
void clk_sys_udelay(uint32_t microseconds);
/**
* @}
*/
#endif /* __CLK_H__ */

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@ -36,149 +36,258 @@
#if (QUARK_D2000)
/**
* Analog to Digital Converter (ADC) driver for Quark Microcontrollers.
*
* @defgroup groupADC ADC
* @{
*/
* Analog to Digital Converter (ADC).
*
* @defgroup groupADC Quark D2000 ADC
* @{
*/
/**
* ADC Resolution type.
* ADC sample size type.
*/
typedef uint16_t qm_adc_sample_t;
/**
* ADC calibration type.
*/
typedef uint8_t qm_adc_calibration_t;
typedef enum {
QM_ADC_IDLE, /**< ADC idle. */
QM_ADC_COMPLETE, /**< ADC transfer complete. */
QM_ADC_OVERFLOW, /**< ADC FIFO overflow error. */
} qm_adc_status_t;
/**
* ADC resolution type.
*/
typedef enum {
QM_ADC_RES_6_BITS,
QM_ADC_RES_8_BITS,
QM_ADC_RES_10_BITS,
QM_ADC_RES_12_BITS
QM_ADC_RES_6_BITS, /**< 6-bit mode. */
QM_ADC_RES_8_BITS, /**< 8-bit mode. */
QM_ADC_RES_10_BITS, /**< 10-bit mode. */
QM_ADC_RES_12_BITS /**< 12-bit mode. */
} qm_adc_resolution_t;
/**
* ADC operating mode type.
*/
typedef enum {
QM_ADC_MODE_DEEP_PWR_DOWN,
QM_ADC_MODE_PWR_DOWN,
QM_ADC_MODE_STDBY,
QM_ADC_MODE_NORM_CAL,
QM_ADC_MODE_NORM_NO_CAL
QM_ADC_MODE_DEEP_PWR_DOWN, /**< Deep power down mode. */
QM_ADC_MODE_PWR_DOWN, /**< Power down mode. */
QM_ADC_MODE_STDBY, /**< Standby mode. */
QM_ADC_MODE_NORM_CAL, /**< Normal mode, with calibration. */
QM_ADC_MODE_NORM_NO_CAL /**< Normal mode, no calibration. */
} qm_adc_mode_t;
/**
* ADC channels type.
*/
*/
typedef enum {
QM_ADC_CH_0,
QM_ADC_CH_1,
QM_ADC_CH_2,
QM_ADC_CH_3,
QM_ADC_CH_4,
QM_ADC_CH_5,
QM_ADC_CH_6,
QM_ADC_CH_7,
QM_ADC_CH_8,
QM_ADC_CH_9,
QM_ADC_CH_10,
QM_ADC_CH_11,
QM_ADC_CH_12,
QM_ADC_CH_13,
QM_ADC_CH_14,
QM_ADC_CH_15,
QM_ADC_CH_16,
QM_ADC_CH_17,
QM_ADC_CH_18
QM_ADC_CH_0, /**< ADC Channel 0. */
QM_ADC_CH_1, /**< ADC Channel 1. */
QM_ADC_CH_2, /**< ADC Channel 2. */
QM_ADC_CH_3, /**< ADC Channel 3. */
QM_ADC_CH_4, /**< ADC Channel 4. */
QM_ADC_CH_5, /**< ADC Channel 5. */
QM_ADC_CH_6, /**< ADC Channel 6. */
QM_ADC_CH_7, /**< ADC Channel 7. */
QM_ADC_CH_8, /**< ADC Channel 8. */
QM_ADC_CH_9, /**< ADC Channel 9. */
QM_ADC_CH_10, /**< ADC Channel 10. */
QM_ADC_CH_11, /**< ADC Channel 11. */
QM_ADC_CH_12, /**< ADC Channel 12. */
QM_ADC_CH_13, /**< ADC Channel 13. */
QM_ADC_CH_14, /**< ADC Channel 14. */
QM_ADC_CH_15, /**< ADC Channel 15. */
QM_ADC_CH_16, /**< ADC Channel 16. */
QM_ADC_CH_17, /**< ADC Channel 17. */
QM_ADC_CH_18 /**< ADC Channel 18. */
} qm_adc_channel_t;
/**
* ADC interrupt callback source.
*/
typedef enum {
QM_ADC_TRANSFER, /**< Transfer complete or error callback. */
QM_ADC_MODE_CHANGED, /**< Mode change complete callback. */
QM_ADC_CAL_COMPLETE, /**< Calibration complete callback. */
} qm_adc_cb_source_t;
/**
* ADC configuration type.
*/
typedef struct
{
/* Sample interval in ADC clock cycles, defines the period to wait
typedef struct {
/**
* Sample interval in ADC clock cycles, defines the period to wait
* between the start of each sample and can be in the range
* [(resolution+2)-255]. */
* [(resolution+2) - 255].
*/
uint8_t window;
qm_adc_resolution_t resolution; /* 12, 10, 8, 6-bit resolution */
qm_adc_resolution_t resolution; /**< 12, 10, 8, 6-bit resolution. */
} qm_adc_config_t;
/**
* ADC xfer type.
* ADC transfer type.
*/
typedef struct
{
qm_adc_channel_t *ch; /* Channel sequence array (1-32 channels) */
uint32_t ch_len; /* Number of channels in the above array */
uint32_t *samples; /* Array to store samples */
uint32_t samples_len; /* Length of sample array */
void (*complete_callback)(void); /* User callback for interrupt mode */
void (*error_callback)(void); /* User callback for error condition */
} qm_adc_xfer_t;
typedef struct {
qm_adc_channel_t *ch; /**< Channel sequence array (1-32 channels). */
uint8_t ch_len; /**< Number of channels in the above array. */
qm_adc_sample_t *samples; /**< Array to store samples. */
uint32_t samples_len; /**< Length of sample array. */
/**
* ADC Interrupt Service Routine
*/
void qm_adc_0_isr(void);
/**
* Transfer callback.
*
* Called when a conversion is performed or an error is detected.
*
* @param[in] data The callback user data.
* @param[in] error 0 on success.
* Negative @ref errno for possible error codes.
* @param[in] status ADC status.
* @param[in] source Interrupt callback source.
*/
void (*callback)(void *data, int error, qm_adc_status_t status,
qm_adc_cb_source_t source);
void *callback_data; /**< Callback user data. */
} qm_adc_xfer_t;
/**
* Switch operating mode of ADC.
*
* @param [in] adc Which ADC to enable.
* @param [in] mode ADC operating mode.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
* This call is blocking.
*
* @param[in] adc Which ADC to enable.
* @param[in] mode ADC operating mode.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_adc_set_mode(const qm_adc_t adc, const qm_adc_mode_t mode);
int qm_adc_set_mode(const qm_adc_t adc, const qm_adc_mode_t mode);
/**
* Calibrate the ADC. It is necessary to calibrate if it is intended to use
* Normal Mode With Calibration. The calibration must be performed if the ADC
* is used for the first time or has been in in deep power down mode. This
* call is blocking.
* Switch operating mode of ADC.
*
* @param [in] adc Which ADC to calibrate.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
* This call is non-blocking and will call the user callback on completion.
*
* @param[in] adc Which ADC to enable.
* @param[in] mode ADC operating mode.
* @param[in] callback Callback called on completion.
* @param[in] callback_data The callback user data.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_adc_calibrate(const qm_adc_t adc);
int qm_adc_irq_set_mode(const qm_adc_t adc, const qm_adc_mode_t mode,
void (*callback)(void *data, int error,
qm_adc_status_t status,
qm_adc_cb_source_t source),
void *callback_data);
/**
* Set ADC configuration. This sets the sample window and resolution.
* Calibrate the ADC.
*
* @brief Set ADC configuration.
* @param [in] adc Which ADC to configure.
* @param [in] cfg ADC configuration.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
* It is necessary to calibrate if it is intended to use Normal Mode With
* Calibration. The calibration must be performed if the ADC is used for the
* first time or has been in deep power down mode. This call is blocking.
*
* @param[in] adc Which ADC to calibrate.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_adc_set_config(const qm_adc_t adc, const qm_adc_config_t *const cfg);
int qm_adc_calibrate(const qm_adc_t adc);
/**
* Retrieve ADC configuration. This gets the sample window and resolution.
* Calibrate the ADC.
*
* @param [in] adc Which ADC to read the configuration of.
* @param [out] cfg ADC configuration.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
* It is necessary to calibrate if it is intended to use Normal Mode With
* Calibration. The calibration must be performed if the ADC is used for the
* first time or has been in deep power down mode. This call is non-blocking
* and will call the user callback on completion.
*
* @param[in] adc Which ADC to calibrate.
* @param[in] callback Callback called on completion.
* @param[in] callback_data The callback user data.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_adc_get_config(const qm_adc_t adc, qm_adc_config_t *const cfg);
int qm_adc_irq_calibrate(const qm_adc_t adc,
void (*callback)(void *data, int error,
qm_adc_status_t status,
qm_adc_cb_source_t source),
void *callback_data);
/**
* Convert values from the ADC. This blocking call can read 1-32 ADC values
* into the array provided.
* Set ADC calibration data.
*
* @brief Poll based ADC convert
* @param [in] adc Which ADC to read.
* @param [in] xfer Channel and sample info.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
* @param[in] adc Which ADC to set calibration for.
* @param[in] cal_data Calibration data.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_adc_convert(const qm_adc_t adc, qm_adc_xfer_t *xfer);
int qm_adc_set_calibration(const qm_adc_t adc, const qm_adc_calibration_t cal);
/**
* Read current value from ADC channel. Convert values from the ADC, this is a
* non-blocking call and will call the user provided callback after the
* requested number of samples have been converted.
* Get the current calibration data for an ADC.
*
* @brief IRQ based ADC convert
* @param [in] adc Which ADC to read.
* @param [in] xfer Channel, sample and callback info.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
* @param[in] adc Which ADC to get calibration for.
* @param[out] adc Calibration data. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_adc_irq_convert(const qm_adc_t adc, qm_adc_xfer_t *xfer);
int qm_adc_get_calibration(const qm_adc_t adc, qm_adc_calibration_t *const cal);
/**
* Set ADC configuration.
*
* This sets the sample window and resolution.
*
* @param[in] adc Which ADC to configure.
* @param[in] cfg ADC configuration. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_adc_set_config(const qm_adc_t adc, const qm_adc_config_t *const cfg);
/**
* Synchronously read values from the ADC.
*
* This blocking call can read 1-32 ADC values into the array provided.
*
* @param[in] adc Which ADC to read.
* @param[in,out] xfer Channel and sample info. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_adc_convert(const qm_adc_t adc, qm_adc_xfer_t *const xfer);
/**
* Asynchronously read values from the ADC.
*
* This is a non-blocking call and will call the user provided callback after
* the requested number of samples have been converted.
*
* @param[in] adc Which ADC to read.
* @param[in,out] xfer Channel sample and callback info. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_adc_irq_convert(const qm_adc_t adc, qm_adc_xfer_t *const xfer);
/**
* @}

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@ -34,7 +34,7 @@
#include "qm_soc_regs.h"
/**
* Always-on Counters for Quark Mictocontrollers
* Always-on Counters.
*
* @defgroup groupAONC Always-on Counters
* @{
@ -44,110 +44,132 @@
* Always-on Periodic Timer configuration type.
*/
typedef struct {
uint32_t count; /* Time to count down from in clock cycles */
bool int_en; /* Enable/disable the interrupts */
void (*callback)(void); /* Callback function */
} qm_aonpt_config_t;
uint32_t count; /**< Time to count down from in clock cycles.*/
bool int_en; /**< Enable/disable the interrupts. */
/**
* Always-on Periodic Timer Interrupt Service Routine
*/
void qm_aonpt_isr_0(void);
/**
* User callback.
*
* @param[in] data User defined data.
*/
void (*callback)(void *data);
void *callback_data; /**< Callback data. */
} qm_aonpt_config_t;
/**
* Enable the Always-on Counter.
*
* @param[in] aonc Always-on counter to read.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
*/
qm_rc_t qm_aonc_enable(const qm_scss_aon_t aonc);
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_aonc_enable(const qm_scss_aon_t aonc);
/**
* Disable the Always-on Counter.
*
* @param[in] aonc Always-on counter to read.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_aonc_disable(const qm_scss_aon_t aonc);
int qm_aonc_disable(const qm_scss_aon_t aonc);
/**
* Get the current value of the Always-on Counter. Returns a 32-bit value which
* represents the number of clock cycles since the counter was first enabled.
* Get the current value of the Always-on Counter.
*
* Returns a 32-bit value which represents the number of clock cycles
* since the counter was first enabled.
*
* @brief Get the current value of the Always-on Counter.
* @param[in] aonc Always-on counter to read.
* @return uint32_t Value of the counter.
* @param[out] val Value of the counter. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
uint32_t qm_aonc_get_value(const qm_scss_aon_t aonc);
int qm_aonc_get_value(const qm_scss_aon_t aonc, uint32_t *const val);
/**
* Set the Always-on Periodic Timer configuration. This includes the initial
* value of the Always-on Periodic Timer, the interrupt enable and the callback
* function that will be run when the timer expiers and an interrupt is
* triggered. The Periodic Timer is disabled if the counter is set to 0.
* Set the Always-on Periodic Timer configuration.
*
* This includes the initial value of the Always-on Periodic Timer,
* the interrupt enable and the callback function that will be run
* when the timer expiers and an interrupt is triggered.
* The Periodic Timer is disabled if the counter is set to 0.
*
* @brief Set the Always-on Periodic Timer configuration.
* @param[in] aonc Always-on counter to read.
* @param[in] cfg New configuration for the Always-on Periodic Timer.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
*/
qm_rc_t qm_aonpt_set_config(const qm_scss_aon_t aonc,
const qm_aonpt_config_t *const cfg);
/**
* Get the Always-on Periodic Timer configuration. This includes the initial
* value of the Always-on Periodic Timer, the interrupt enable and the callback
* function that will be run when the timer expiers and an interrupt is
* triggered. The Periodic Timer is disabled if the counter is set to 0.
* This must not be NULL.
*
* @brief Get the Always-on Periodic Timer configuration.
* @param[in] aonc Always-on counter to read.
* @param[out] cfg New configuration for the Always-on Periodic Timer.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_aonpt_get_config(const qm_scss_aon_t aonc,
qm_aonpt_config_t *const cfg);
int qm_aonpt_set_config(const qm_scss_aon_t aonc,
const qm_aonpt_config_t *const cfg);
/**
* Get the current value of the Always-on Periodic Timer. Returns a 32-bit value
* which represents the number of clock cycles remaining before the timer fires.
* Get the current value of the Always-on Periodic Timer.
*
* Returns a 32-bit value which represents the number of clock cycles
* remaining before the timer fires.
* This is the initial configured number minus the number of cycles that have
* passed.
*
* @brief Get the current value of the Always-on Periodic Timer.
* @param[in] aonc Always-on counter to read.
* @return uint32_t Value of the Always-on Periodic Timer.
* @param[out] val Value of the Always-on Periodic Timer.
* This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
uint32_t qm_aonpt_get_value(const qm_scss_aon_t aonc);
int qm_aonpt_get_value(const qm_scss_aon_t aonc, uint32_t *const val);
/**
* Get the current status of the Always-on Periodic Timer. Returns true if the
* timer has expired. This will continue to return true until it is cleared with
* qm_aonpt_clear().
* Get the current status of the Always-on Periodic Timer.
*
* Returns true if the timer has expired. This will continue to return true
* until it is cleared with qm_aonpt_clear().
*
* @brief Get the current status of the Always-on Periodic Timer.
* @param[in] aonc Always-on counter to read.
* @return bool Status of the Always-on Periodic Timer.
* @param[out] status Status of the Always-on Periodic Timer.
* This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
bool qm_aonpt_get_status(const qm_scss_aon_t aonc);
int qm_aonpt_get_status(const qm_scss_aon_t aonc, bool *const status);
/**
* Clear the status of the Always-on Periodic Timer. The status must be clear
* before the Always-on Periodic Timer can trigger another interrupt.
* Clear the status of the Always-on Periodic Timer.
*
* The status must be clear before the Always-on Periodic Timer can trigger
* another interrupt.
*
* @brief Clear the status of the Always-on Periodic Timer.
* @param[in] aonc Always-on counter to read.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_aonpt_clear(const qm_scss_aon_t aonc);
int qm_aonpt_clear(const qm_scss_aon_t aonc);
/**
* Reset the Always-on Periodic Timer back to the configured value.
*
* @param[in] aonc Always-on counter to read.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_aonpt_reset(const qm_scss_aon_t aonc);
int qm_aonpt_reset(const qm_scss_aon_t aonc);
/**
* @}

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@ -34,44 +34,43 @@
#include "qm_soc_regs.h"
/**
* Analog comparator for Quark Microcontrollers.
* Analog Comparator.
*
* @defgroup groupAC Analog Comparator
* @{
*/
/**
* Analog comparator configuration type. Each bit in the
* registers controls an Analog Comparator pin.
* Analog Comparator configuration type.
*
* Each bit in the registers controls a single Analog Comparator pin.
*/
typedef struct {
uint32_t int_en; /* Enable/disable comparator interrupt */
uint32_t reference; /* 1b: VREF; 0b: AR_PIN */
uint32_t polarity; /* 0b: input>ref; 1b: input<ref */
uint32_t power; /* 1b: Normal mode; 0b:Power-down/Shutdown mode */
void (*callback)(uint32_t int_status); /* Callback function */
uint32_t int_en; /**< Interrupt enable. */
uint32_t reference; /**< Reference voltage, 1b: VREF; 0b: AR_PIN. */
uint32_t polarity; /**< 0b: input>ref; 1b: input<ref */
uint32_t power; /**< 1b: Normal mode; 0b:Power-down/Shutdown mode */
/**
* Transfer callback.
*
* @param[in] data Callback user data.
* @param[in] status Comparator interrupt status.
*/
void (*callback)(void *data, uint32_t int_status);
void *callback_data; /**< Callback user data. */
} qm_ac_config_t;
/**
* Analog Comparator Interrupt Service Routine
*/
void qm_ac_isr(void);
/**
* Get Analog Comparator configuration.
*
* @param [in] config Analog Comparator configuration.
* @return qm_rc_t QM_RC_OK on success, QM_RC_ERR otherwise.
*/
qm_rc_t qm_ac_get_config(qm_ac_config_t *const config);
/**
* Set Analog Comparator configuration.
*
* @param [in] config Analog Comparator configuration.
* @return qm_rc_t QM_RC_OK on success, QM_RC_ERR otherwise.
* @param[in] config Analog Comparator configuration. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_ac_set_config(const qm_ac_config_t *const config);
int qm_ac_set_config(const qm_ac_config_t *const config);
/**
* @}

View file

@ -0,0 +1,262 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __QM_DMA_H_
#define __QM_DMA_H_
#include "qm_common.h"
#include "qm_soc_regs.h"
/**
* DMA Driver for Quark Microcontrollers.
*
* @defgroup groupDMA DMA
* @{
*/
/**
* DMA Handshake Polarity
*/
typedef enum {
QM_DMA_HANDSHAKE_POLARITY_HIGH = 0x0, /**< Set HS polarity high. */
QM_DMA_HANDSHAKE_POLARITY_LOW = 0x1 /**< Set HS polarity low. */
} qm_dma_handshake_polarity_t;
/**
* DMA Burst Transfer Length
*/
typedef enum {
QM_DMA_BURST_TRANS_LENGTH_1 = 0x0, /**< Burst length 1 data item. */
QM_DMA_BURST_TRANS_LENGTH_4 = 0x1, /**< Burst length 4 data items. */
QM_DMA_BURST_TRANS_LENGTH_8 = 0x2, /**< Burst length 8 data items. */
QM_DMA_BURST_TRANS_LENGTH_16 = 0x3, /**< Burst length 16 data items. */
QM_DMA_BURST_TRANS_LENGTH_32 = 0x4, /**< Burst length 32 data items. */
QM_DMA_BURST_TRANS_LENGTH_64 = 0x5, /**< Burst length 64 data items. */
QM_DMA_BURST_TRANS_LENGTH_128 =
0x6, /**< Burst length 128 data items. */
QM_DMA_BURST_TRANS_LENGTH_256 = 0x7 /**< Burst length 256 data items. */
} qm_dma_burst_length_t;
/**
* DMA Transfer Width
*/
typedef enum {
QM_DMA_TRANS_WIDTH_8 = 0x0, /**< Transfer width of 8 bits. */
QM_DMA_TRANS_WIDTH_16 = 0x1, /**< Transfer width of 16 bits. */
QM_DMA_TRANS_WIDTH_32 = 0x2, /**< Transfer width of 32 bits. */
QM_DMA_TRANS_WIDTH_64 = 0x3, /**< Transfer width of 64 bits. */
QM_DMA_TRANS_WIDTH_128 = 0x4, /**< Transfer width of 128 bits. */
QM_DMA_TRANS_WIDTH_256 = 0x5 /**< Transfer width of 256 bits. */
} qm_dma_transfer_width_t;
/**
* DMA channel direction.
*/
typedef enum {
QM_DMA_MEMORY_TO_MEMORY = 0x0, /**< Memory to memory transfer. */
QM_DMA_MEMORY_TO_PERIPHERAL =
0x1, /**< Memory to peripheral transfer. */
QM_DMA_PERIPHERAL_TO_MEMORY = 0x2 /**< Peripheral to memory transfer. */
} qm_dma_channel_direction_t;
/**
* DMA channel configuration structure
*/
typedef struct {
/** DMA channel handshake interface ID */
qm_dma_handshake_interface_t handshake_interface;
/** DMA channel handshake polarity */
qm_dma_handshake_polarity_t handshake_polarity;
/** DMA channel direction */
qm_dma_channel_direction_t channel_direction;
/** DMA source transfer width */
qm_dma_transfer_width_t source_transfer_width;
/** DMA destination transfer width */
qm_dma_transfer_width_t destination_transfer_width;
/** DMA source burst length */
qm_dma_burst_length_t source_burst_length;
/** DMA destination burst length */
qm_dma_burst_length_t destination_burst_length;
/**
* Client callback for DMA transfer ISR
*
* @param[in] callback_context DMA client context.
* @param[in] len Data length transferred.
* @param[in] error Error code.
*/
void (*client_callback)(void *callback_context, uint32_t len,
int error_code);
/** DMA client context passed to the callbacks */
void *callback_context;
} qm_dma_channel_config_t;
/**
* DMA transfer configuration structure
*/
typedef struct {
uint32_t block_size; /**< DMA block size, Min = 1, Max = 4095. */
uint32_t *source_address; /**< DMA source transfer address. */
uint32_t *destination_address; /**< DMA destination transfer address. */
} qm_dma_transfer_t;
/**
* Initialise the DMA controller.
*
* The DMA controller and channels are first disabled.
* All DMA controller interrupts are masked
* using the controllers interrupt masking registers. The system
* DMA interrupts are then unmasked. Finally the DMA controller
* is enabled. This function must only be called once as it
* resets the DMA controller and interrupt masking.
*
* @param[in] dma DMA instance.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_dma_init(const qm_dma_t dma);
/**
* Setup a DMA channel configuration.
*
* Configures the channel source width, burst size, channel direction,
* handshaking interface and registers the client callback and callback
* context. qm_dma_init() must first be called before configuring
* a channel. This function only needs to be called once unless
* a channel is being repurposed.
*
* @param[in] dma DMA instance.
* @param[in] channel_id The channel to start.
* @param[in] channel_config The DMA channel configuration as
* defined by the DMA client. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_dma_channel_set_config(const qm_dma_t dma,
const qm_dma_channel_id_t channel_id,
qm_dma_channel_config_t *const channel_config);
/**
* Setup a DMA channel transfer.
*
* Configure the source address,destination addresses and block size.
* qm_dma_channel_set_config() must first be called before
* configuring a transfer. qm_dma_transfer_set_config() must
* be called before starting every transfer, even if the
* addresses and block size remain unchanged.
*
* @param[in] dma DMA instance.
* @param[in] channel_id The channel to start.
* @param[in] transfer_config The transfer DMA configuration
* as defined by the dma client.
* This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_dma_transfer_set_config(const qm_dma_t dma,
const qm_dma_channel_id_t channel_id,
qm_dma_transfer_t *const transfer_config);
/**
* Start a DMA transfer.
*
* qm_dma_transfer_set_config() mustfirst be called
* before starting a transfer.
*
* @param[in] dma DMA instance.
* @param[in] channel_id The channel to start.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_dma_transfer_start(const qm_dma_t dma,
const qm_dma_channel_id_t channel_id);
/**
* Terminate a DMA transfer.
*
* This function is only called if a transfer needs to be terminated manually.
* This may be require if an expected transfer complete callback
* has not been received. Terminating the transfer will
* trigger the transfer complete callback. The length
* returned by the callback is the transfer length at the
* time that the transfer was terminated.
*
* @param[in] dma DMA instance.
* @param[in] channel_id The channel to stop.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_dma_transfer_terminate(const qm_dma_t dma,
const qm_dma_channel_id_t channel_id);
/**
* Setup and start memory to memory transfer.
*
* This function will setup a memory to memory transfer by
* calling qm_dma_transfer_setup() and will then start the
* transfer by calling qm_dma_transfer_start(). This is
* done for consistency across user applications.
*
* @param[in] dma DMA instance.
* @param[in] channel_id The channel to start.
* @param[in] transfer_config The transfer DMA configuration
* as defined by the dma client.
* This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_dma_transfer_mem_to_mem(const qm_dma_t dma,
const qm_dma_channel_id_t channel_id,
qm_dma_transfer_t *const transfer_config);
/**
* @}
*/
#endif /* __QM_DMA_H_ */

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@ -34,31 +34,46 @@
#include "qm_soc_regs.h"
/**
* Flash Controller for Quark Microcontrollers.
* Flash controller.
*
* @brief Flash Controller for QM.
* @defgroup groupFlash Flash
* @{
*/
/** Flash mask to clear timing. */
#define QM_FLASH_TMG_DEF_MASK (0xFFFFFC00)
/** Flash mask to clear micro seconds. */
#define QM_FLASH_MICRO_SEC_COUNT_MASK (0x3F)
/** Flash mask to clear wait state. */
#define QM_FLASH_WAIT_STATE_MASK (0x3C0)
/** Flash wait state offset bit. */
#define QM_FLASH_WAIT_STATE_OFFSET (6)
/** Flash write disable offset bit. */
#define QM_FLASH_WRITE_DISABLE_OFFSET (4)
/** Flash write disable value. */
#define QM_FLASH_WRITE_DISABLE_VAL BIT(4)
#define QM_FLASH_PAGE_SIZE (0x200)
/** Flash page size in dwords. */
#define QM_FLASH_PAGE_SIZE_DWORDS (0x200)
/** Flash page size in bytes. */
#define QM_FLASH_PAGE_SIZE_BYTES (0x800)
/** Flash page size in bits. */
#define QM_FLASH_PAGE_SIZE_BITS (11)
#define ROM_PROG BIT(2)
/** Flash page erase request. */
#define ER_REQ BIT(1)
/** Flash page erase done. */
#define ER_DONE (1)
/** Flash page write request. */
#define WR_REQ (1)
/** Flash page write done. */
#define WR_DONE BIT(1)
/** Flash write address offset. */
#define WR_ADDR_OFFSET (2)
/** Flash perform mass erase includes OTP region. */
#define MASS_ERASE_INFO BIT(6)
/** Flash perform mass erase. */
#define MASS_ERASE BIT(7)
#define QM_FLASH_ADDRESS_MASK (0x7FF)
@ -66,137 +81,162 @@
#define QM_FLASH_ADDR_INC (0x10)
/**
* Flash region enum
* Flash region enum.
*/
typedef enum {
QM_FLASH_REGION_OTP = 0,
QM_FLASH_REGION_SYS,
QM_FLASH_REGION_OTP = 0, /**< Flash OTP region. */
QM_FLASH_REGION_SYS, /**< Flash System region. */
#if (QUARK_D2000)
QM_FLASH_REGION_DATA,
QM_FLASH_REGION_DATA, /**< Flash Data region (Quark D2000 only). */
#endif
QM_FLASH_REGION_NUM
QM_FLASH_REGION_NUM /**< Total number of flash regions. */
} qm_flash_region_t;
/**
* Flash write disable / enable enum
* Flash write disable / enable enum.
*/
typedef enum {
QM_FLASH_WRITE_ENABLE,
QM_FLASH_WRITE_DISABLE
QM_FLASH_WRITE_ENABLE, /**< Flash write enable. */
QM_FLASH_WRITE_DISABLE /**< Flash write disable. */
} qm_flash_disable_t;
/**
* Flash configuration structure
* Flash configuration structure.
*/
typedef struct {
uint8_t wait_states; /**< Read wait state */
uint8_t us_count; /**< Number of clocks in a microsecond */
qm_flash_disable_t write_disable; /**< Write disable */
uint8_t wait_states; /**< Read wait state. */
uint8_t us_count; /**< Number of clocks in a microsecond. */
qm_flash_disable_t write_disable; /**< Write disable. */
} qm_flash_config_t;
/**
* Configure a Flash controller. This includes timing and behavioral settings.
* When switching SoC to a higher frequency, this register must be updated first
* to reflect settings associated with higher frequency BEFORE SoC frequency is
* changed. On the other hand, when switching SoC to a lower frequency, this
* register must be updated only 6 NOP instructions AFTER the SoC frequency has
* been updated. Otherwise, flash timings will be violated.
* Configure a Flash controller.
*
* @brief Configure a Flash controller.
* @param [in] flash Flash controller index.
* @param [in] cfg Flash configuration.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
* The configuration includes timing and behavioral settings.
*
* Note: when switching SoC to a higher frequency, flash controllers must be
* reconfigured to reflect settings associated with higher frequency BEFORE SoC
* frequency is changed. On the other hand, when switching SoC to a lower
* frequency, flash controller must be reconfigured only 6 NOP instructions
* AFTER the SoC frequency has been updated. Otherwise, flash timings will be
* violated.
*
* @param[in] flash Flash controller index.
* @param[in] cfg Flash configuration. It must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_flash_set_config(const qm_flash_t flash, qm_flash_config_t *cfg);
int qm_flash_set_config(const qm_flash_t flash,
const qm_flash_config_t *const cfg);
/**
* Retrieve Flash controller configuration. This will set the
* cfg parameter to match the current configuration of the
* given Flash controller.
* Write 4 bytes of data to Flash.
*
* @brief Get Flash controller configuration.
* @param [in] flash Flash controller index.
* @param [out] cfg Flash configuration.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
* Brownout check is performed before initiating the write.
*
* Note: this function performs a write operation only; page erase may be
* needed if the page is already programmed.
*
* @param[in] flash Flash controller index.
* @param[in] region Flash region to address.
* @param[in] f_addr Address within Flash physical address space.
* @param[in] data Data word to write.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_flash_get_config(const qm_flash_t flash, qm_flash_config_t *cfg);
int qm_flash_word_write(const qm_flash_t flash, const qm_flash_region_t region,
uint32_t f_addr, const uint32_t data);
/**
* Write 4 bytes of data to Flash. Check for brownout before initiating the
* write. Note this function performs a write operation only; page erase
* may be needed if the page is already programmed.
* Write multiple of 4 bytes of data to Flash.
*
* @brief Write 4 bytes of data to Flash.
* @param [in] flash Flash controller index.
* @param [in] region Flash region to address.
* @param [in] f_addr Address within Flash physical address space.
* @param [in] data Data word to write.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
* Brownout check is performed before initiating the write. The page is erased,
* and then written to.
*
* NOTE: Since this operation may take some time to complete, the caller is
* responsible for ensuring that the watchdog timer does not elapse in the
* meantime (e.g., by restarting it before calling this function).
*
* @param[in] flash Flash controller index.
* @param[in] region Which Flash region to address.
* @param[in] f_addr Address within Flash physical address space.
* @param[in] page_buf Page buffer to store page during update. Must be at
* least QM_FLASH_PAGE_SIZE words big and must not be NULL.
* @param[in] data Data to write (array of words). This must not be NULL.
* @param[in] len Length of data to write (number of words).
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_flash_word_write(const qm_flash_t flash, qm_flash_region_t region,
uint32_t f_addr, const uint32_t data);
int qm_flash_page_update(const qm_flash_t flash, const qm_flash_region_t reg,
uint32_t f_addr, uint32_t *const page_buf,
const uint32_t *const data, uint32_t len);
/**
* Write a multiple of 4 bytes of data to Flash.
* Check for brownout before initiating the write.
* The page is erased, and then written to.
* Write a 2KB flash page.
*
* @brief Write multiple of 4 bytes of data to Flash.
* @param [in] flash Flash controller index.
* @param [in] region Which Flash region to address.
* @param [in] f_addr Address within Flash physical address space.
* @param [in] page_buffer Page buffer to store page, must be at least
* QM_FLASH_PAGE_SIZE words big.
* @param [in] data_buffer Data buffer to write.
* @param [in] len Length of data to write.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
*/
qm_rc_t qm_flash_page_update(const qm_flash_t flash, qm_flash_region_t region,
uint32_t f_addr, uint32_t *page_buffer,
uint32_t *data_buffer, uint32_t len);
/**
* Write a 2KB page of Flash. Check for brownout before initiating the write.
* The page is erased, and then written to.
* Brownout check is performed before initiating the write. The page is erased,
* and then written to.
*
* @brief Write s 2KB flash page.
* @param [in] flash Flash controller index.
* @param [in] region Which Flash region to address.
* @param [in] page_num Which page of flash to overwrite.
* @param [in] data Data buffer to write.
* @param [in] len Length of data to write.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
* NOTE: Since this operation may take some time to complete, the caller is
* responsible for ensuring that the watchdog timer does not elapse in the
* meantime (e.g., by restarting it before calling this function).
*
* @param[in] flash Flash controller index.
* @param[in] region Which Flash region to address.
* @param[in] page_num Which page of flash to overwrite.
* @param[in] data Data to write (array of words). This must not be NULL.
* @param[in] len Length of data to write (number of words).
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_flash_page_write(const qm_flash_t flash, qm_flash_region_t region,
uint32_t page_num, uint32_t *data, uint32_t len);
int qm_flash_page_write(const qm_flash_t flash, const qm_flash_region_t region,
uint32_t page_num, const uint32_t *data, uint32_t len);
/**
* Erase one page of Flash.
* Check for brownout before initiating the write.
*
* @brief Erase one page of Flash.
* @param [in] flash Flash controller index.
* @param [in] region Flash region to address.
* @param [in] page_num Page within the Flash controller to erase.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
* Brownout check is performed before initiating the write.
*
* @param[in] flash Flash controller index.
* @param[in] region Flash region to address.
* @param[in] page_num Page within the Flash controller to erase.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
/* Having page be 32-bits, saves 6 bytes over using 8 / 16-bits. */
qm_rc_t qm_flash_page_erase(const qm_flash_t flash, qm_flash_region_t region,
uint32_t page_num);
int qm_flash_page_erase(const qm_flash_t flash, const qm_flash_region_t region,
uint32_t page_num);
/**
* Perform Flash mass erase.
* Check for brownout before initiating the erase.
* Performs mass erase on the Flash controller. The mass erase
* may include the ROM region, if present and unlocked.
* Note it is not possible to mass-erase the ROM portion separately.
* Perform mass erase.
*
* @brief Perform mass erase.
* @param [in] flash Flash controller index.
* @param [in] include_rom If set, it also erases the ROM region.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
* Perform mass erase on the specified flash controller. Brownout check is
* performed before initiating the erase. The mass erase may include the ROM
* region, if present and unlocked. Note: it is not possible to mass-erase the
* ROM portion separately.
*
* NOTE: Since this operation may take some time to complete, the caller is
* responsible for ensuring that the watchdog timer does not elapse in the
* meantime (e.g., by restarting it before calling this function).
*
* @param[in] flash Flash controller index.
* @param[in] include_rom If set, it also erases the ROM region.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_flash_mass_erase(const qm_flash_t flash, uint8_t include_rom);
int qm_flash_mass_erase(const qm_flash_t flash, const uint8_t include_rom);
/**
* @}

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@ -34,63 +34,79 @@
#include "qm_soc_regs.h"
/**
* Flash Protection Region control for Quark Microcontrollers.
* Flash Protection Region control.
*
* @brief Flash Protection Region for QM.
* @defgroup groupFPR FPR
* @{
*/
typedef void (*qm_fpr_callback_t)(void);
/**
* FPR Interrupt Service Routines
*/
void qm_fpr_isr_0(void);
void qm_fpr_isr_1(void);
typedef void (*qm_fpr_callback_t)(void *data);
/**
* FPR register map.
*/
typedef enum { QM_FPR_0, QM_FPR_1, QM_FPR_2, QM_FPR_3, QM_FPR_NUM } qm_fpr_id_t;
typedef enum {
QM_FPR_DISABLE,
QM_FPR_ENABLE,
QM_FPR_LOCK_DISABLE,
QM_FPR_LOCK_ENABLE
QM_FPR_0, /**< FPR 0. */
QM_FPR_1, /**< FPR 1. */
QM_FPR_2, /**< FPR 2. */
QM_FPR_3, /**< FPR 3. */
QM_FPR_NUM
} qm_fpr_id_t;
/**
* FPR enable type.
*/
typedef enum {
QM_FPR_DISABLE, /**< Disable FPR. */
QM_FPR_ENABLE, /**< Enable FPR. */
QM_FPR_LOCK_DISABLE, /**< Disable FPR lock. */
QM_FPR_LOCK_ENABLE /**< Enable FPR lock. */
} qm_fpr_en_t;
/**
* FPR vilation mode type.
*/
typedef enum {
FPR_VIOL_MODE_INTERRUPT = 0,
FPR_VIOL_MODE_RESET,
FPR_VIOL_MODE_PROBE
FPR_VIOL_MODE_INTERRUPT = 0, /**< Generate interrupt on violation. */
FPR_VIOL_MODE_RESET, /**< Reset SoC on violation. */
FPR_VIOL_MODE_PROBE /**< Enter probe mode on violation. */
} qm_fpr_viol_mode_t;
/**
* FPR region type.
*/
typedef enum {
QM_MAIN_FLASH_SYSTEM = 0,
QM_MAIN_FLASH_SYSTEM = 0, /**< System flash region. */
#if (QUARK_D2000)
QM_MAIN_FLASH_OTP,
QM_MAIN_FLASH_DATA, /**< Data flash region. */
#endif
QM_MAIN_FLASH_NUM,
QM_MAIN_FLASH_NUM, /**< Number of flash regions. */
} qm_flash_region_type_t;
/**
* FPR read allow type.
*/
typedef enum {
QM_FPR_HOST_PROCESSOR = BIT(0),
QM_FPR_HOST_PROCESSOR =
BIT(0), /**< Allow host processor to access flash region. */
#if (QUARK_SE)
QM_FPR_SENSOR_SUBSYSTEM = BIT(1),
QM_FPR_SENSOR_SUBSYSTEM =
BIT(1), /**< Allow sensor subsystem to access flash region. */
#endif
QM_FPR_DMA = BIT(2),
QM_FPR_DMA = BIT(2), /**< Allow DMA to access flash region. */
#if (QUARK_SE)
QM_FPR_OTHER_AGENTS = BIT(3)
QM_FPR_OTHER_AGENTS =
BIT(3) /**< Allow other agents to access flash region. */
#endif
} qm_fpr_read_allow_t;
/** Flash Protection Region configuration structure */
/**
* Flash Protection Region configuration structure.
*/
typedef struct {
qm_fpr_en_t en_mask; /**< Enable/lock bitmask */
qm_fpr_read_allow_t allow_agents; /**< Per-agent read enable bitmask */
uint8_t up_bound; /**< 1KB-aligned upper Flash phys addr */
uint8_t low_bound; /**< 1KB-aligned lower Flash phys addr */
qm_fpr_en_t en_mask; /**< Enable/lock bitmask. */
qm_fpr_read_allow_t allow_agents; /**< Per-agent read enable bitmask. */
uint8_t up_bound; /**< 1KB-aligned upper Flash phys addr. */
uint8_t low_bound; /**< 1KB-aligned lower Flash phys addr. */
} qm_fpr_config_t;
#define QM_FPR_FPR0_REG_OFFSET (7)
@ -107,59 +123,44 @@ typedef struct {
#define QM_FPR_LOCK BIT(31)
/** FPR enable mask */
#define QM_FPR_EN_MASK_ENABLE BIT(0)
/** FPR mask lock */
#define QM_FPR_EN_MASK_LOCK BIT(1)
/** FPR mask host */
#define QM_FPR_AGENT_MASK_HOST BIT(0)
/** FPR mask ss */
#define QM_FPR_AGENT_MASK_SS BIT(1)
/** FPR mask dma */
#define QM_FPR_AGENT_MASK_DMA BIT(2)
/** FPR mask other agents */
#define QM_FPR_AGENT_MASK_OTHER BIT(3)
/**
* Configure a Flash controller's Flash Protection Region.
*
* @param [in] flash Which Flash controller to configure.
* @param [in] id FPR identifier.
* @param [in] cfg FPR configuration.
* @param [in] region The region of Flash to be configured.
* @return RC_OK on success, error code otherwise.
* @param[in] flash Which Flash controller to configure.
* @param[in] id FPR identifier.
* @param[in] cfg FPR configuration.
* @param[in] region The region of Flash to be configured.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_fpr_set_config(const qm_flash_t flash, const qm_fpr_id_t id,
const qm_fpr_config_t *const cfg,
const qm_flash_region_type_t region);
int qm_fpr_set_config(const qm_flash_t flash, const qm_fpr_id_t id,
const qm_fpr_config_t *const cfg,
const qm_flash_region_type_t region);
/**
* Retrieve Flash controller's Flash Protection Region configuration.
* This will set the cfg parameter to match the current configuration
* of the given Flash controller's FPR.
* Configure FPR violation behaviour.
*
* @brief Get Flash FPR configuration.
* @param [in] flash Which Flash to read the configuration of.
* @param [in] id FPR identifier.
* @param [out] cfg FPR configuration.
* @param [in] region The region of Flash configured.
* @return RC_OK on success, error code otherwise.
* @param[in] mode (generate interrupt, warm reset, enter probe mode).
* @param[in] flash controller.
* @param[in] fpr_cb for interrupt mode (only).
* @param[in] data user callback data for interrupt mode (only).
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_fpr_get_config(const qm_flash_t flash, const qm_fpr_id_t id,
qm_fpr_config_t *const cfg,
const qm_flash_region_type_t region);
/**
* Configure FPR violation behaviour
*
* @param [in] mode (generate interrupt, warm reset, enter probe mode).
* @param [in] fpr_cb for interrupt mode (only). This cannot be null.
* @param [in] flash controller.
* @return RC_OK on success, error code otherwise.
* */
qm_rc_t qm_fpr_set_violation_policy(const qm_fpr_viol_mode_t mode,
const qm_flash_t flash,
qm_fpr_callback_t fpr_cb);
int qm_fpr_set_violation_policy(const qm_fpr_viol_mode_t mode,
const qm_flash_t flash,
qm_fpr_callback_t fpr_cb, void *data);
/**
* @}

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@ -34,107 +34,144 @@
#include "qm_soc_regs.h"
/**
* General Purpose IO for Quark Microcontrollers.
* General Purpose IO.
*
* @defgroup groupGPIO GPIO
* @{
*/
/**
* GPIO port configuration type. Each bit in the registers control a GPIO pin.
*/
* GPIO pin states.
*/
typedef enum {
QM_GPIO_LOW, /**< GPIO low state. */
QM_GPIO_HIGH, /**< GPIO high state. */
QM_GPIO_STATE_NUM /**< Number of GPIO states. */
} qm_gpio_state_t;
/**
* GPIO port configuration type.
*
* Each bit in the registers control a GPIO pin.
*/
typedef struct {
uint32_t direction; /* GPIO direction, 0b: input, 1b: output */
uint32_t int_en; /* Interrupt enable */
uint32_t int_type; /* Interrupt type, 0b: level; 1b: edge */
uint32_t int_polarity; /* Interrupt polarity, 0b: low, 1b: high */
uint32_t int_debounce; /* Debounce on/off */
uint32_t int_bothedge; /* Interrupt on both rising and falling edges */
void (*callback)(uint32_t int_status); /* Callback function */
uint32_t direction; /**< GPIO direction, 0b: input, 1b: output. */
uint32_t int_en; /**< Interrupt enable. */
uint32_t int_type; /**< Interrupt type, 0b: level; 1b: edge. */
uint32_t int_polarity; /**< Interrupt polarity, 0b: low, 1b: high. */
uint32_t int_debounce; /**< Interrupt debounce on/off. */
uint32_t int_bothedge; /**< Interrupt on rising and falling edges. */
/**
* Transfer callback.
*
* @param[in] data Callback user data.
* @param[in] int_status GPIO interrupt status.
*/
void (*callback)(void *data, uint32_t int_status);
void *callback_data; /**< Callback user data. */
} qm_gpio_port_config_t;
/**
* GPIO Interrupt Service Routine
*/
void qm_gpio_isr_0(void);
#if (HAS_AON_GPIO)
void qm_aon_gpio_isr_0(void);
#endif /* HAS_AON_GPIO */
/**
* Set GPIO port configuration. This includes if interrupts are enabled or not,
* the level on which an interrupt is generated, the polarity of interrupts and
* if GPIO-debounce is enabled or not. If interrupts are enabled it also
* registers an ISR with the user defined callback function.
* Set GPIO port configuration.
*
* This includes if interrupts are enabled or not, the level on which an
* interrupt is generated, the polarity of interrupts and if GPIO-debounce is
* enabled or not. If interrupts are enabled it also registers the user defined
* callback function.
*
* @brief Set GPIO port configuration.
* @param[in] gpio GPIO port index to configure.
* @param[in] cfg New configuration for GPIO port.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
*/
qm_rc_t qm_gpio_set_config(const qm_gpio_t gpio,
const qm_gpio_port_config_t *const cfg);
/**
* Get GPIO port configuration. This includes if interrupts are enabled or not,
* the level on which an interrupt is generated, the polarity of interrupts and
* if GPIO-debounce is enabled or not.
* @param[in] cfg New configuration for GPIO port. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*
* @brief Get GPIO port configuration.
* @param[in] gpio GPIO port index to read the configuration of.
* @param[out] cfg Current configuration for GPIO port.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
*/
qm_rc_t qm_gpio_get_config(const qm_gpio_t gpio,
qm_gpio_port_config_t *const cfg);
int qm_gpio_set_config(const qm_gpio_t gpio,
const qm_gpio_port_config_t *const cfg);
/**
* Read the current value of a single pin on a given GPIO port.
* Read the current state of a single pin on a given GPIO port.
*
* @param[in] gpio GPIO port index.
* @param[in] pin Pin of GPIO port to read.
* @return bool Value of the pin specified on GPIO port.
* @param[out] state Current state of the pin. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
bool qm_gpio_read_pin(const qm_gpio_t gpio, const uint8_t pin);
int qm_gpio_read_pin(const qm_gpio_t gpio, const uint8_t pin,
qm_gpio_state_t *const state);
/**
* Set a single pin on a given GPIO port.
*
* @param[in] gpio GPIO port index.
* @param[in] pin Pin of GPIO port to set.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_gpio_set_pin(const qm_gpio_t gpio, const uint8_t pin);
int qm_gpio_set_pin(const qm_gpio_t gpio, const uint8_t pin);
/**
* Clear a single pin on a given GPIO port.
*
* @param[in] gpio GPIO port index.
* @param[in] pin Pin of GPIO port to clear.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
* @return int 0 on success, error code otherwise.
*
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_gpio_clear_pin(const qm_gpio_t gpio, const uint8_t pin);
int qm_gpio_clear_pin(const qm_gpio_t gpio, const uint8_t pin);
/**
* Read entire GPIO port. Each bit of the val parameter is set to the current
* value of each pin on the port. Maximum 32 pins per port.
* Set or clear a single GPIO pin using a state variable.
*
* @brief Get GPIO port values.
* @param[in] gpio GPIO port index.
* @return uint32_t Value of all pins on GPIO port.
* @param[in] pin Pin of GPIO port to update.
* @param[in] state QM_GPIO_LOW for low or QM_GPIO_HIGH for high.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
uint32_t qm_gpio_read_port(const qm_gpio_t gpio);
int qm_gpio_set_pin_state(const qm_gpio_t gpio, const uint8_t pin,
const qm_gpio_state_t state);
/**
* Write entire GPIO port. Each pin on the GPIO port is set to the
* corresponding value set in the val parameter. Maximum 32 pins per port.
* Read the value of every pin on a GPIO port.
*
* Each bit of the val parameter is set to the current value of each pin on the
* port. Maximum 32 pins per port.
*
* @param[in] gpio GPIO port index.
* @param[out] port State of every pin in a GPIO port. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_gpio_read_port(const qm_gpio_t gpio, uint32_t *const port);
/**
* Write a value to every pin on a GPIO port.
*
* Each pin on the GPIO port is set to the corresponding value set in the val
* parameter. Maximum 32 pins per port.
*
* @brief Get GPIO port values.
* @param[in] gpio GPIO port index.
* @param[in] val Value of all pins on GPIO port.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_gpio_write_port(const qm_gpio_t gpio, const uint32_t val);
int qm_gpio_write_port(const qm_gpio_t gpio, const uint32_t val);
/**
* @}

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@ -32,208 +32,304 @@
#include "qm_common.h"
#include "qm_soc_regs.h"
#include "qm_dma.h"
/**
* I2C driver for Quark Microcontrollers.
* I2C.
*
* @defgroup groupI2C I2C
* @{
*/
/* High/low period for 50% duty cycle bus clock (in nanoseconds) */
/* High/low period for 50% duty cycle bus clock (in nanoseconds). */
#define QM_I2C_SS_50_DC_NS (5000)
#define QM_I2C_FS_50_DC_NS (1250)
#define QM_I2C_FSP_50_DC_NS (500)
/* Minimum low period to meet timing requirements (in nanoseconds) */
/* Minimum low period to meet timing requirements (in nanoseconds). */
#define QM_I2C_MIN_SS_NS (4700)
#define QM_I2C_MIN_FS_NS (1300)
#define QM_I2C_MIN_FSP_NS (500)
/* Data command register masks and values. */
#define DATA_COMMAND_READ_COMMAND_BYTE (QM_I2C_IC_DATA_CMD_READ >> 8)
#define DATA_COMMAND_STOP_BIT_BYTE (QM_I2C_IC_DATA_CMD_STOP_BIT_CTRL >> 8)
/**
* QM I2C addressing type.
*/
typedef enum { QM_I2C_7_BIT = 0, QM_I2C_10_BIT } qm_i2c_addr_t;
typedef enum{
QM_I2C_7_BIT = 0, /**< 7-bit mode. */
QM_I2C_10_BIT /**< 10-bit mode. */
} qm_i2c_addr_t;
/**
* QM I2C master / slave mode type.
*/
typedef enum { QM_I2C_MASTER, QM_I2C_SLAVE } qm_i2c_mode_t;
typedef enum {
QM_I2C_MASTER, /**< Master mode. */
QM_I2C_SLAVE /**< Slave mode. */
} qm_i2c_mode_t;
/**
* QM I2C Speed Type.
*/
typedef enum {
QM_I2C_SPEED_STD = 1, /* Standard mode (100 Kbps) */
QM_I2C_SPEED_FAST = 2, /* Fast mode (400 Kbps) */
QM_I2C_SPEED_FAST_PLUS = 3 /* Fast plus mode (1 Mbps) */
QM_I2C_SPEED_STD = 1, /**< Standard mode (100 Kbps). */
QM_I2C_SPEED_FAST = 2, /**< Fast mode (400 Kbps). */
QM_I2C_SPEED_FAST_PLUS = 3 /**< Fast plus mode (1 Mbps). */
} qm_i2c_speed_t;
/**
* I2C status type.
*/
typedef enum {
QM_I2C_IDLE = 0,
QM_I2C_TX_ABRT_7B_ADDR_NOACK = BIT(0),
QM_I2C_TX_ABRT_10ADDR1_NOACK = BIT(1),
QM_I2C_TX_ABRT_10ADDR2_NOACK = BIT(2),
QM_I2C_TX_ABRT_TXDATA_NOACK = BIT(3),
QM_I2C_TX_ABRT_GCALL_NOACK = BIT(4),
QM_I2C_TX_ABRT_GCALL_READ = BIT(5),
QM_I2C_TX_ABRT_HS_ACKDET = BIT(6),
QM_I2C_TX_ABRT_SBYTE_ACKDET = BIT(7),
QM_I2C_TX_ABRT_HS_NORSTRT = BIT(8),
QM_I2C_TX_ABRT_10B_RD_NORSTRT = BIT(10),
QM_I2C_TX_ABRT_MASTER_DIS = BIT(11),
QM_I2C_TX_ARB_LOST = BIT(12),
QM_I2C_TX_ABRT_SLVFLUSH_TXFIFO = BIT(13),
QM_I2C_TX_ABRT_SLV_ARBLOST = BIT(14),
QM_I2C_TX_ABRT_SLVRD_INTX = BIT(15),
QM_I2C_TX_ABRT_USER_ABRT = BIT(16),
QM_I2C_BUSY = BIT(17),
QM_I2C_IDLE = 0, /**< Controller idle. */
QM_I2C_TX_ABRT_7B_ADDR_NOACK = BIT(0), /**< 7-bit address noack. */
QM_I2C_TX_ABRT_10ADDR1_NOACK = BIT(1), /**< 10-bit address noack. */
QM_I2C_TX_ABRT_10ADDR2_NOACK = BIT(2), /**< 10-bit second address
byte address noack. */
QM_I2C_TX_ABRT_TXDATA_NOACK = BIT(3), /**< Tx data noack. */
QM_I2C_TX_ABRT_GCALL_NOACK = BIT(4), /**< General call noack. */
QM_I2C_TX_ABRT_GCALL_READ = BIT(5), /**< Read after general call. */
QM_I2C_TX_ABRT_HS_ACKDET = BIT(6), /**< High Speed master ID ACK. */
QM_I2C_TX_ABRT_SBYTE_ACKDET = BIT(7), /**< Start ACK. */
QM_I2C_TX_ABRT_HS_NORSTRT = BIT(8), /**< High Speed with restart
disabled. */
QM_I2C_TX_ABRT_10B_RD_NORSTRT = BIT(10), /**< 10-bit address read and
restart disabled. */
QM_I2C_TX_ABRT_MASTER_DIS = BIT(11), /**< Master disabled. */
QM_I2C_TX_ARB_LOST = BIT(12), /**< Master lost arbitration. */
QM_I2C_TX_ABRT_SLVFLUSH_TXFIFO = BIT(13), /**< Slave flush tx FIFO. */
QM_I2C_TX_ABRT_SLV_ARBLOST = BIT(14), /**< Slave lost bus. */
QM_I2C_TX_ABRT_SLVRD_INTX = BIT(15), /**< Slave read completion. */
QM_I2C_TX_ABRT_USER_ABRT = BIT(16), /**< User abort. */
QM_I2C_BUSY = BIT(17) /**< Controller busy. */
} qm_i2c_status_t;
/**
* I2C configuration type.
*/
typedef struct {
qm_i2c_speed_t speed; /* Standard, Fast Mode */
qm_i2c_addr_t address_mode; /* 7 or 10 bit addressing */
qm_i2c_mode_t mode; /* Master or slave mode */
uint16_t slave_addr; /* I2C address when in slave mode */
qm_i2c_speed_t speed; /**< Standard, Fast Mode. */
qm_i2c_addr_t address_mode; /**< 7 or 10 bit addressing. */
qm_i2c_mode_t mode; /**< Master or slave mode. */
uint16_t slave_addr; /**< I2C address when in slave mode. */
} qm_i2c_config_t;
/**
* I2C transfer type.
* Master mode:
* - if Tx len is 0: perform receive-only transaction
* - if Rx len is 0: perform transmit-only transaction
* - both Tx and Rx len not 0: perform a transmit-then-
* receive combined transaction
* - If tx len is 0: perform receive-only transaction.
* - If rx len is 0: perform transmit-only transaction.
* - Both tx and Rx len not 0: perform a transmit-then-receive
* combined transaction.
* Slave mode:
* - If read or write exceed the buffer, then wrap around.
*/
typedef struct {
uint8_t *tx; /* Write data */
uint32_t tx_len; /* Write data length */
uint8_t *rx; /* Read data */
uint32_t rx_len; /* Read buffer length */
uint32_t id; /* Callback identifier */
bool stop; /* Generate master STOP */
void (*tx_callback)(uint32_t id, uint32_t len); /* Write callback -
required if tx !=
NULL*/
void (*rx_callback)(uint32_t id, uint32_t len); /* Read callback -
required if rx !=
NULL */
void (*err_callback)(uint32_t id,
qm_i2c_status_t status); /* Error callback -
required*/
uint8_t *tx; /**< Write data. */
uint32_t tx_len; /**< Write data length. */
uint8_t *rx; /**< Read data. */
uint32_t rx_len; /**< Read buffer length. */
bool stop; /**< Generate master STOP. */
void (*callback)(void *data, int rc, qm_i2c_status_t status,
uint32_t len); /**< Callback. */
void *callback_data; /**< Callback identifier. */
} qm_i2c_transfer_t;
/**
* I2C 0 Interrupt Service Routine.
*/
void qm_i2c_0_isr(void);
/**
* I2C 1 Interrupt Service Routine.
*/
void qm_i2c_1_isr(void);
/**
* Set I2C configuration.
*
* @param [in] i2c Which I2C to set the configuration of.
* @param [out] cfg I2C configuration.
* @return qm_rc_t Returns QM_RC_OK on success, error code otherwise.
*/
qm_rc_t qm_i2c_set_config(const qm_i2c_t i2c, const qm_i2c_config_t *const cfg);
/**
* Retrieve I2C configuration.
* @param[in] i2c Which I2C to set the configuration of.
* @param[out] cfg I2C configuration. This must not be NULL.
*
* @param [in] i2c Which I2C to read the configuration of.
* @param [out] cfg I2C configuration.
* @return qm_rc_t Returns QM_RC_OK on success, error code otherwise.
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_i2c_get_config(const qm_i2c_t i2c, qm_i2c_config_t *const cfg);
int qm_i2c_set_config(const qm_i2c_t i2c, const qm_i2c_config_t *const cfg);
/**
* Set I2C speed.
*
* Fine tune I2C clock speed. This will set the SCL low count
* and the SCL hi count cycles. To achieve any required speed.
* @brief Set I2C speed.
* @param [in] i2c I2C index.
* @param [in] speed Bus speed (Standard or Fast. Fast includes Fast+ mode)
* @param [in] lo_cnt SCL low count.
* @param [in] hi_cnt SCL high count.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
* @param[in] i2c I2C index.
* @param[in] speed Bus speed (Standard or Fast. Fast includes Fast+ mode).
* @param[in] lo_cnt SCL low count.
* @param[in] hi_cnt SCL high count.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_i2c_set_speed(const qm_i2c_t i2c, qm_i2c_speed_t speed,
uint16_t lo_cnt, uint16_t hi_cnt);
int qm_i2c_set_speed(const qm_i2c_t i2c, const qm_i2c_speed_t speed,
const uint16_t lo_cnt, const uint16_t hi_cnt);
/**
* Retrieve I2C status.
*
* @param [in] i2c Which I2C to read the status of.
* @return qm_i2c_status_t Returns Free of Busy.
* @param[in] i2c Which I2C to read the status of.
* @param[out] status Get i2c status. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_i2c_status_t qm_i2c_get_status(const qm_i2c_t i2c);
int qm_i2c_get_status(const qm_i2c_t i2c, qm_i2c_status_t *const status);
/**
* Master write on I2C.
*
* Perform a master write on the I2C bus. This is a blocking synchronous call.
*
* @brief Master write on I2C.
* @param [in] i2c Which I2C to write to.
* @param [in] slave_addr Address of slave to write to.
* @param [in] data Pre-allocated buffer of data to write.
* @param [in] len length of data to write.
* @param [in] stop Generate a STOP condition at the end of Tx
* @return qm_rc_t Returns QM_RC_OK on success, error code otherwise.
* @param[in] i2c Which I2C to write to.
* @param[in] slave_addr Address of slave to write to.
* @param[in] data Pre-allocated buffer of data to write. This must not be NULL.
* @param[in] len length of data to write.
* @param[in] stop Generate a STOP condition at the end of tx.
* @param[out] status Get i2c status.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_i2c_master_write(const qm_i2c_t i2c, const uint16_t slave_addr,
const uint8_t *const data, uint32_t len, bool stop);
int qm_i2c_master_write(const qm_i2c_t i2c, const uint16_t slave_addr,
const uint8_t *const data, uint32_t len,
const bool stop, qm_i2c_status_t *const status);
/**
* Master read of I2C.
*
* Perform a single byte master read from the I2C. This is a blocking call.
*
* @brief Master read of I2C.
* @param [in] i2c Which I2C to read from.
* @param [in] slave_addr Address of slave device to read from.
* @param [out] data Pre-allocated buffer to populate with data.
* @param [in] len length of data to read from slave.
* @param [in] stop Generate a STOP condition at the end of Tx
* @return qm_rc_t Returns QM_RC_OK on success, error code otherwise.
* @param[in] i2c Which I2C to read from.
* @param[in] slave_addr Address of slave device to read from.
* @param[out] data Pre-allocated buffer to populate with data. This must not be
* NULL.
* @param[in] len length of data to read from slave.
* @param[in] stop Generate a STOP condition at the end of tx.
* @param[out] status Get i2c status.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_i2c_master_read(const qm_i2c_t i2c, const uint16_t slave_addr,
uint8_t *const data, uint32_t len, bool stop);
int qm_i2c_master_read(const qm_i2c_t i2c, const uint16_t slave_addr,
uint8_t *const data, uint32_t len,
const bool stop, qm_i2c_status_t *const status);
/**
* Interrupt based master transfer on I2C.
*
* Perform an interrupt based master transfer on the I2C bus. The function will
* replenish/empty TX/RX FIFOs on I2C empty/full interrupts.
*
* @brief Interrupt based master transfer on I2C.
* @param[in] i2c Which I2C to transfer from.
* @param[in] xfer Transfer structure includes write / read data and length,
* write, read and error callback functions and a callback
* identifier.
* @param [in] slave_addr Address of slave to transfer data with.
* @return qm_rc_t Returns QM_RC_OK on success, error code otherwise.
* @param[in] xfer Transfer structure includes write / read buffers, length,
* user callback function and the callback context. This must
* not be NULL.
* @param[in] slave_addr Address of slave to transfer data with.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_i2c_master_irq_transfer(const qm_i2c_t i2c,
const qm_i2c_transfer_t *const xfer,
const uint16_t slave_addr);
int qm_i2c_master_irq_transfer(const qm_i2c_t i2c,
const qm_i2c_transfer_t *const xfer,
const uint16_t slave_addr);
/**
* Terminate I2C IRQ transfer.
*
* Terminate the current IRQ or DMA transfer on the I2C bus.
* This will cause the error callback to be called with status
* This will cause the user callback to be called with status
* QM_I2C_TX_ABRT_USER_ABRT.
*
* @brief Terminate I2C IRQ/DMA transfer.
* @param [in] i2c I2C register block pointer.
* @return qm_rc_t Returns QM_QM_RC_OK on success, error code otherwise.
* @param[in] i2c I2C register block pointer.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_i2c_transfer_terminate(const qm_i2c_t i2c);
int qm_i2c_irq_transfer_terminate(const qm_i2c_t i2c);
/**
* Configure a DMA channel with a specific transfer direction.
*
* Configure a DMA channel with a specific transfer direction. The user is
* responsible for managing the allocation of the pool of DMA channels provided
* by each DMA core to the different peripheral drivers that require them. Note
* that a I2C controller cannot use different DMA cores to manage transfers in
* different directions.
*
* This function configures DMA channel parameters that are unlikely to change
* between transfers, like transaction width, burst size, and handshake
* interface parameters. The user will likely only call this function once for
* the lifetime of an application unless the channel needs to be repurposed.
*
* Note that qm_dma_init() must first be called before configuring a channel.
*
* @param[in] i2c I2C controller identifier.
* @param[in] dma_ctrl_id DMA controller identifier.
* @param[in] dma_channel_id DMA channel identifier.
* @param[in] dma_channel_direction DMA channel direction, either
* QM_DMA_MEMORY_TO_PERIPHERAL (TX transfer) or QM_DMA_PERIPHERAL_TO_MEMORY
* (RX transfer).
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_i2c_dma_channel_config(const qm_i2c_t i2c,
const qm_dma_t dma_controller_id,
const qm_dma_channel_id_t channel_id,
const qm_dma_channel_direction_t direction);
/**
* Perform a DMA-based transfer on the I2C bus.
*
* Perform a DMA-based transfer on the I2C bus. If the transfer is TX only, it
* will enable DMA operation for the controller and start the transfer.
*
* If it's an RX only transfer, it will require 2 channels, one for writting the
* READ commands and another one for reading the bytes from the bus. Both DMA
* operations will start in parallel.
*
* If this is a combined transaction, both TX and RX operations will be set up,
* but only TX will be started. On TX finish (callback), the TX channel will be
* used for writing the READ commands and the RX operation will start.
*
* Note that qm_i2c_dma_channel_config() must first be called in order to
* configure all DMA channels needed for a transfer.
*
* @param[in] i2c I2C controller identifier.
* @param[in] xfer Structure containing pre-allocated write and read data
* buffers and callback functions. This pointer must be kept valid until the
* transfer is complete.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_i2c_master_dma_transfer(const qm_i2c_t i2c,
qm_i2c_transfer_t *const xfer,
const uint16_t slave_addr);
/**
* Terminate any DMA transfer going on on the controller.
*
* Calls the DMA driver to stop any ongoing DMA transfer and calls
* qm_i2c_irq_transfer_terminate.
*
* @param[in] i2c Which I2C to terminate transfers from.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_i2c_dma_transfer_terminate(const qm_i2c_t i2c);
/**
* @}

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@ -34,7 +34,7 @@
#include "qm_soc_regs.h"
/**
* Initialisation and reset for Quark Microcontrollers.
* Initialisation and reset.
*
* @defgroup groupInit Initialisation
* @{
@ -44,12 +44,14 @@
* Reset Mode type.
*/
typedef enum {
QM_WARM_RESET = BIT(1),
QM_COLD_RESET = BIT(3),
QM_WARM_RESET = BIT(1), /**< Warm reset. */
QM_COLD_RESET = BIT(3), /**< Cold reset. */
} qm_soc_reset_t;
/**
* Reset the SoC. This can either be a cold reset or a warm reset.
* Reset the SoC.
*
* This can either be a cold reset or a warm reset.
*
* @param [in] reset_type Selects the type of reset to perform.
*/

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@ -37,17 +37,16 @@
* Linear mapping between IRQs and interrupt vectors
*/
#if (QUARK_SE)
#define QM_IRQ_TO_VECTOR(irq) (irq + 36)
#define QM_IRQ_TO_VECTOR(irq) (irq + 36) /**< Get the vector of and irq. */
#elif(QUARK_D2000)
#define QM_IRQ_TO_VECTOR(irq) (irq + 32)
#define QM_IRQ_TO_VECTOR(irq) (irq + 32) /**< Get the vector of and irq. */
#endif
/**
* Interrupt driver for Quark Microcontrollers.
* Interrupt driver.
*
* @brief Interrupt for QM.
* @defgroup groupINT Interrupt
* @{
*/
@ -55,7 +54,7 @@
/**
* Interrupt service routine type
*/
typedef void (*qm_isr_t)(void);
typedef void (*qm_isr_t)(struct interrupt_frame *frame);
/**
* Enable interrupt delivery for the SoC.
@ -70,46 +69,36 @@ void qm_irq_disable(void);
/**
* Unmask a given interrupt line.
*
* @param [in] irq Which IRQ to unmask.
* @param[in] irq Which IRQ to unmask.
*/
void qm_irq_unmask(uint32_t irq);
/**
* Mask a given interrupt line.
*
* @param [in] irq Which IRQ to mask.
* @param[in] irq Which IRQ to mask.
*/
void qm_irq_mask(uint32_t irq);
/**
* Register an Interrupt Service Routine to a given interrupt vector.
*
* @param [in] vector Interrupt Vector number.
* @param [in] isr ISR to register to given vector. Must be a valid x86 ISR.
* If this can't be provided, qm_irq_request() or
* qm_int_vector_request() should be used instead.
*/
void _qm_register_isr(uint32_t vector, qm_isr_t isr);
/**
* Setup an IRQ and its routing on the Interrupt Controller.
*
* @param [in] irq IRQ number. Must be of type QM_IRQ_XXX.
* @param [in] register_offset Interrupt Mask Register offset on SCSS.
* Must be of type QM_IRQ_XXX_MASK_OFFSET.
*/
void _qm_irq_setup(uint32_t irq, uint16_t register_offset);
/**
* Request a given IRQ and register Interrupt Service Routine to interrupt
* vector.
*
* @brief Request an IRQ and attach an ISR to it.
* @param [in] irq IRQ number. Must be of type QM_IRQ_XXX.
* @param [in] isr ISR to register to given IRQ.
* @param[in] irq IRQ number. Must be of type QM_IRQ_XXX.
* @param[in] isr ISR to register to given IRQ.
*/
#if (UNIT_TEST)
#define qm_irq_request(irq, isr)
#elif(QM_SENSOR)
#define qm_irq_request(irq, isr) \
do { \
_qm_register_isr(irq##_VECTOR, isr); \
_qm_irq_setup(irq, irq##_MASK_OFFSET); \
} while (0);
#else
#define qm_irq_request(irq, isr) \
do { \
@ -122,33 +111,29 @@ void _qm_irq_setup(uint32_t irq, uint16_t register_offset);
/**
* Request an interrupt vector and register Interrupt Service Routine to it.
*
* @brief Request an interrupt vector and attach an ISR to it.
* @param [in] vector Vector number.
* @param [in] isr ISR to register to given IRQ.
* @param[in] vector Vector number.
* @param[in] isr ISR to register to given IRQ.
*/
#if (UNIT_TEST)
#define qm_int_vector_request(vector, isr)
#else
#if (__iamcu == 1)
/* Using the IAMCU calling convention */
#define qm_int_vector_request(vector, isr) \
do { \
__asm__ __volatile__("mov $1f, %%edx\n\t" \
"mov %0, %%eax\n\t" \
"call %P1\n\t" \
"jmp 2f\n\t" \
".align 4\n\t" \
"1:\n\t" \
" pushal\n\t" \
" call %P2\n\t" \
" popal\n\t" \
" iret\n\t" \
"2:\n\t" ::"g"(vector), \
"i"(_qm_register_isr), "i"(isr) \
: "%eax", "%ecx", "%edx"); \
} while (0)
#else
/* Using the standard SysV calling convention */
#if (__iamcu__)
/*
* We assume that if the compiler supports the IAMCU ABI it also
* supports the 'interrupt' attribute.
*/
static __inline__ void qm_int_vector_request(uint32_t vector, qm_isr_t isr)
{
_qm_register_isr(vector, isr);
}
#else /* __iamcu__ */
/*
* Using the standard SysV calling convention. A dummy (NULL in this case)
* parameter is added to ISR handler, to maintain consistency with the API
* imposed by the __attribute__((interrupt)) usage.
*/
#define qm_int_vector_request(vector, isr) \
do { \
__asm__ __volatile__("push $1f\n\t" \
@ -159,15 +144,16 @@ void _qm_irq_setup(uint32_t irq, uint16_t register_offset);
".align 4\n\t" \
"1:\n\t" \
" pushal\n\t" \
" push $0x00\n\t" \
" call %P2\n\t" \
" add $4, %%esp\n\t" \
" popal\n\t" \
" iret\n\t" \
"2:\n\t" ::"g"(vector), \
"i"(_qm_register_isr), "i"(isr) \
: "%eax", "%ecx", "%edx"); \
} while (0)
#endif /* __iamcu == 1 */
#endif /* __iamcu__ */
#endif /* UNIT_TEST */
/**

View file

@ -0,0 +1,340 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __QM_ISR_H__
#define __QM_ISR_H__
#include "qm_common.h"
#include "qm_soc_regs.h"
/**
* Interrupt Service Routines.
*
* @defgroup groupISR ISR
* @{
*/
#if (QUARK_D2000)
/**
* ISR for ADC 0 convert and calibration interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_ADC_0, qm_adc_0_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_adc_0_isr);
/**
* ISR for ADC 0 change mode interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_ADC_PWR_0, qm_adc_pwr_0_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_adc_pwr_0_isr);
#endif /* QUARK_D2000 */
#if (QUARK_SE)
/**
* ISR for SS ADC 0 calibration interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_SS_IRQ_ADC_CAL, qm_ss_adc_0_cal_isr);
* @endcode if IRQ based calibration is used.
*/
QM_ISR_DECLARE(qm_ss_adc_0_cal_isr);
/**
* ISR for SS ADC 0 mode change interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_SS_IRQ_ADC_PWR, qm_ss_adc_0_pwr_isr);
* @endcode if IRQ based mode change is used.
*/
QM_ISR_DECLARE(qm_ss_adc_0_pwr_isr);
#endif /* QUARK_SE */
/**
* ISR for Always-on Periodic Timer 0 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_AONPT_0, qm_aonpt_isr_0);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_aonpt_isr_0);
/**
* ISR for Analog Comparator 0 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_AC, qm_ac_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_ac_isr);
/**
* ISR for DMA error interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_DMA_ERR, qm_dma_0_isr_err);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_dma_0_isr_err);
/**
* ISR for DMA channel 0 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_DMA_0, qm_dma_0_isr_0);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_dma_0_isr_0);
/**
* ISR for DMA channel 1 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_DMA_1, qm_dma_0_isr_1);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_dma_0_isr_1);
#if (QUARK_SE)
/**
* ISR for DMA channel 2 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_DMA_2, qm_dma_0_isr_2);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_dma_0_isr_2);
/**
* ISR for DMA channel 3 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_DMA_3, qm_dma_0_isr_3);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_dma_0_isr_3);
/**
* ISR for DMA channel 4 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_DMA_4, qm_dma_0_isr_4);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_dma_0_isr_4);
/**
* ISR for DMA channel 5 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_DMA_5, qm_dma_0_isr_5);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_dma_0_isr_5);
/**
* ISR for DMA channel 6 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_DMA_6, qm_dma_0_isr_6);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_dma_0_isr_6);
/**
* ISR for DMA 0 channel 7 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_DMA_7, qm_dma_0_isr_7);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_dma_0_isr_7);
#endif /* QUARK_SE */
/**
* ISR for FPR 0 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_FLASH_0, qm_fpr_isr_0);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_fpr_isr_0);
/**
* ISR for FPR 1 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_FLASH_1, qm_fpr_isr_1);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_fpr_isr_1);
/**
* ISR for GPIO 0 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_GPIO_0, qm_gpio_isr_0);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_gpio_isr_0);
#if (HAS_AON_GPIO)
/**
* ISR for AON GPIO 0 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_AONGPIO_0, qm_aon_gpio_isr_0);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_aon_gpio_isr_0);
#endif /* HAS_AON_GPIO */
/**
* ISR for I2C 0 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_I2C_0, qm_i2c_0_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_i2c_0_isr);
/**
* ISR for I2C 1 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_I2C_1, qm_i2c_1_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_i2c_1_isr);
/**
* ISR for Mailbox interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_MBOX, qm_mbox_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_mbox_isr);
/**
* ISR for Memory Protection Region interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_SRAM, qm_mpr_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_mpr_isr);
/**
* ISR for PIC Timer interrupt.
*
* On Quark Microcontroller D2000 Development Platform,
* this function needs to be registered with:
* @code qm_int_vector_request(QM_INT_VECTOR_PIC_TIMER, qm_pic_timer_isr);
* @endcode if IRQ based transfers are used.
*
* On Quark SE, this function needs to be registered with:
* @code qm_irq_request(QM_IRQ_PIC_TIMER, qm_pic_timer_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_pic_timer_isr);
/**
* ISR for PWM 0 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_PWM_0, qm_pwm_isr_0);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_pwm_isr_0);
/**
* ISR for RTC 0 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_RTC_0, qm_rtc_isr_0);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_rtc_isr_0);
/**
* ISR for SPI Master 0 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_SPI_MASTER_0, qm_spi_master_0_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_spi_master_0_isr);
#if (QUARK_SE)
/**
* ISR for SPI Master 1 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_SPI_MASTER_1, qm_spi_master_1_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_spi_master_1_isr);
#endif /* QUARK_SE */
/**
* ISR for UART 0 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_UART_0, qm_uart_0_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_uart_0_isr);
/**
* ISR for UART 1 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_UART_1, qm_uart_1_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_uart_1_isr);
/**
* ISR for WDT 0 interrupt.
*
* This function needs to be registered with
* @code qm_irq_request(QM_IRQ_WDT_0, qm_wdt_isr_0);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_wdt_isr_0);
/**
* @}
*/
#endif /* __QM_ISR_H__ */

View file

@ -0,0 +1,170 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __QM_MAILBOX_H__
#define __QM_MAILBOX_H__
#include "qm_common.h"
#include "qm_soc_regs.h"
#if (HAS_MAILBOX)
/**
* Mailbox driver.
*
* @defgroup groupMailbox Mailbox
* @{
*/
/**
* Mailbox status. Those values are tied to HW bit setting and made up of
* the bit 0 and bit 1 of the mailbox channel status register.
*/
typedef enum {
/**< No interrupt pending nor any data to consume. */
QM_MBOX_CH_IDLE = 0,
QM_MBOX_CH_DATA = BIT(0), /**< Message has not been consumed. */
QM_MBOX_CH_INT = BIT(1), /**< Channel interrupt pending. */
QM_MBOX_CH_STATUS_MASK = BIT(1) | BIT(0) /**< Status mask. */
} qm_mbox_ch_status_t;
/**
* Mailbox channel.
*/
typedef enum {
QM_MBOX_CH_0 = 0, /**< Channel 0. */
QM_MBOX_CH_1, /**< Channel 1. */
QM_MBOX_CH_2, /**< Channel 2. */
QM_MBOX_CH_3, /**< Channel 3. */
QM_MBOX_CH_4, /**< Channel 4. */
QM_MBOX_CH_5, /**< Channel 5. */
QM_MBOX_CH_6, /**< Channel 6. */
QM_MBOX_CH_7, /**< Channel 7. */
QM_MBOX_CH_NUM /**< Mailbox number of channels. */
} qm_mbox_ch_t;
/**
* mailbox message pay-load index values.
*/
typedef enum {
QM_MBOX_PAYLOAD_0 = 0, /**< Payload index value 0. */
QM_MBOX_PAYLOAD_1, /**< Payload index value 1. */
QM_MBOX_PAYLOAD_2, /**< Payload index value 2. */
QM_MBOX_PAYLOAD_3, /**< Payload index value 3. */
QM_MBOX_PAYLOAD_NUM, /**< Numbers of payloads. */
} qm_mbox_payload_t;
/**
* Definition of the mailbox message.
*/
typedef struct {
uint32_t ctrl; /**< Mailbox control element. */
uint32_t data[QM_MBOX_PAYLOAD_NUM]; /**< Mailbox data buffer. */
} qm_mbox_msg_t;
/**
* Definition of the mailbox callback function prototype.
* @param[in] data The callback user data.
*/
typedef void (*qm_mbox_callback_t)(void *data);
/**
* Set the mailbox channel configuration.
*
* Configure a IRQ callback, enables or disables IRQ for the chosen mailbox
* channel.
*
* @param[in] mbox_ch Mailbox to enable.
* @param[in] mpr_cb Callback function to call on read mailbox, NULL for a
* write to Mailbox).
* @param[in] cb_data Callback function data to return via the callback.
* function. This must not be NULL.
* @param[in] irq_en Flag to enable/disable IRQ for this channel.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_mbox_ch_set_config(const qm_mbox_ch_t mbox_ch, qm_mbox_callback_t mpr_cb,
void *cb_data, const bool irq_en);
/**
* Write to a specified mailbox channel.
*
* @param[in] mbox_ch Mailbox channel identifier.
* @param[in] msg Pointer to the data to write to the mailbox channel. This
* must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_mbox_ch_write(const qm_mbox_ch_t mbox_ch,
const qm_mbox_msg_t *const msg);
/**
* Read specified mailbox channel.
*
* @param[in] mbox_ch mailbox channel identifier.
* @param[out] data pointer to the data to read from the mailbox channel. This
* must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_mbox_ch_read(const qm_mbox_ch_t mbox_ch, qm_mbox_msg_t *const msg);
/**
* Retrieve the specified mailbox channel status.
*
* @param[in] mbox_ch Mailbox identifier to retrieve the status from.
* @param[out] status Mailbox status. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_mbox_ch_get_status(const qm_mbox_ch_t mbox_ch,
qm_mbox_ch_status_t *const status);
/**
* Acknowledge the data arrival.
* @param[in] mbox_ch: Mailbox identifier to retrieve the status from.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_mbox_ch_data_ack(const qm_mbox_ch_t mbox_ch);
/**
* @}
*/
#endif /* HAS_MAILBOX */
#endif /* __QM_MAILBOX_H__ */

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@ -53,7 +53,7 @@
/** MPR mask dma */
#define QM_SRAM_MPR_AGENT_MASK_DMA BIT(2)
typedef void (*qm_mpr_callback_t)(void);
typedef void (*qm_mpr_callback_t)(void *);
/* MPR identifier */
typedef enum {
@ -79,42 +79,25 @@ typedef enum {
MPR_VIOL_MODE_PROBE
} qm_mpr_viol_mode_t;
/**
* MPR Interrupt Service Routine
*/
void qm_mpr_isr(void);
/**
* Configure SRAM controller's Memory Protection Region.
*
* @param [in] id Which MPR to configure.
* @param [in] cfg MPR configuration.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
* @return int 0 on success, error code otherwise.
*/
qm_rc_t qm_mpr_set_config(const qm_mpr_id_t id,
const qm_mpr_config_t *const cfg);
/**
* Retrieve SRAM controller's Memory Protection Region configuration.
* This will set the cfg parameter to match the current configuration
* of the SRAM controller's MPR.
*
* @brief Get MPR configuration.
* @param [in] id Which MPR to get configuration of.
* @param [out] cfg MPR configuration.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
*/
qm_rc_t qm_mpr_get_config(const qm_mpr_id_t id, qm_mpr_config_t *const cfg);
int qm_mpr_set_config(const qm_mpr_id_t id, const qm_mpr_config_t *const cfg);
/**
* Configure MPR violation behaviour
*
* @param [in] mode (generate interrupt, warm reset, enter probe mode).
* @param [in] callback_fn for interrupt mode (only). This can not be null.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
* @param [in] callback_fn for interrupt mode (only).
* @param [in] callback_data user data for interrupt mode (only).
* @return int 0 on success, error code otherwise.
* */
qm_rc_t qm_mpr_set_violation_policy(const qm_mpr_viol_mode_t mode,
qm_mpr_callback_t callback_fn);
int qm_mpr_set_violation_policy(const qm_mpr_viol_mode_t mode,
qm_mpr_callback_t callback_fn, void *data);
/**
* @}

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@ -34,74 +34,78 @@
#include "qm_soc_regs.h"
/**
* PIC timer for Quark Microcontrollers.
*
* @defgroup groupPICTimer PIC Timer
* @{
*/
* PIC timer.
*
* @defgroup groupPICTimer PIC Timer
* @{
*/
/**
* PIC timer mode type.
*/
typedef enum {
QM_PIC_TIMER_MODE_ONE_SHOT,
QM_PIC_TIMER_MODE_PERIODIC
QM_PIC_TIMER_MODE_ONE_SHOT, /**< One shot mode. */
QM_PIC_TIMER_MODE_PERIODIC /**< Periodic mode. */
} qm_pic_timer_mode_t;
/**
* PIC timer configuration type.
*/
* PIC timer configuration type.
*/
typedef struct {
qm_pic_timer_mode_t mode; /**< Operation mode */
bool int_en; /**< Interrupt enable */
void (*callback)(void); /**< Callback function */
qm_pic_timer_mode_t mode; /**< Operation mode. */
bool int_en; /**< Interrupt enable. */
/**
* User callback.
*
* @param[in] data User defined data.
*/
void (*callback)(void *data);
void *callback_data; /**< Callback user data. */
} qm_pic_timer_config_t;
/**
* PIC timer Interrupt Service Routine
* Set the PIC timer configuration.
*
* Set the PIC timer configuration.
* This includes timer mode and if interrupts are enabled. If interrupts are
* enabled, it will configure the callback function.
*
* @param[in] cfg PIC timer configuration. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
void qm_pic_timer_isr(void);
int qm_pic_timer_set_config(const qm_pic_timer_config_t *const cfg);
/**
* Set the PIC timer configuration.
* This includes timer mode and if interrupts are enabled. If interrupts are
* enabled, it will configure the callback function.
*
* @brief Set the PIC timer configuration.
* @param [in] cfg PIC timer configuration.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
*/
qm_rc_t qm_pic_timer_set_config(const qm_pic_timer_config_t *const cfg);
* Set the current count value of the PIC timer.
*
* Set the current count value of the PIC timer.
* A value equal to 0 effectively stops the timer.
*
* @param[in] count Value to load the timer with.
*
* @return Standard errno return type for QMSI.
* @retval Always returns 0.
* @retval Negative @ref errno for possible error codes.
*/
int qm_pic_timer_set(const uint32_t count);
/**
* Get PIC timer configuration.
* Populate the cfg parameter to match the current configuration of the PIC
* timer.
*
* @brief Get PIC timer configuration.
* @param[out] cfg PIC timer configuration.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
*/
qm_rc_t qm_pic_timer_get_config(qm_pic_timer_config_t *const cfg);
* Get the current count value of the PIC timer.
*
* @param[out] count Pointer to the store the timer count.
* This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_pic_timer_get(uint32_t *const count);
/**
* Set the current count value of the PIC timer.
* A value equal to 0 effectively stops the timer.
*
* @param [in] count Value to load the timer with.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
*/
qm_rc_t qm_pic_timer_set(const uint32_t count);
/**
* Get the current count value of the PIC timer.
*
* @return uint32_t Returns current PIC timer count value.
*/
uint32_t qm_pic_timer_get(void);
/**
* @}
*/
* @}
*/
#endif /* __QM_PIC_TIMER_H__ */

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@ -34,7 +34,7 @@
#include "qm_soc_regs.h"
/**
* Pin muxing configuration for Quark Microcontrollers.
* Pin muxing configuration.
*
* @defgroup groupPinMux Pin Muxing setup
* @{
@ -44,10 +44,10 @@
* Pin function type.
*/
typedef enum {
QM_PMUX_FN_0,
QM_PMUX_FN_1,
QM_PMUX_FN_2,
QM_PMUX_FN_3,
QM_PMUX_FN_0, /**< Gpio function 0. */
QM_PMUX_FN_1, /**< Gpio function 0. */
QM_PMUX_FN_2, /**< Gpio function 0. */
QM_PMUX_FN_3, /**< Gpio function 0. */
} qm_pmux_fn_t;
/**
@ -55,89 +55,89 @@ typedef enum {
*/
typedef enum {
#if (QUARK_SE)
QM_PMUX_SLEW_2MA,
QM_PMUX_SLEW_4MA,
QM_PMUX_SLEW_2MA, /**< Set gpio slew rate to 2MA. */
QM_PMUX_SLEW_4MA, /**< Set gpio slew rate to 4MA. */
#else
QM_PMUX_SLEW_12MA,
QM_PMUX_SLEW_16MA,
QM_PMUX_SLEW_12MA, /**< Set gpio slew rate to 12MA. */
QM_PMUX_SLEW_16MA, /**< Set gpio slew rate to 16MA. */
#endif
QM_PMUX_SLEW_NUM
QM_PMUX_SLEW_NUM /**< Max number of slew rate options. */
} qm_pmux_slew_t;
/**
* External Pad pin identifiers
**/
typedef enum {
QM_PIN_ID_0,
QM_PIN_ID_1,
QM_PIN_ID_2,
QM_PIN_ID_3,
QM_PIN_ID_4,
QM_PIN_ID_5,
QM_PIN_ID_6,
QM_PIN_ID_7,
QM_PIN_ID_8,
QM_PIN_ID_9,
QM_PIN_ID_10,
QM_PIN_ID_11,
QM_PIN_ID_12,
QM_PIN_ID_13,
QM_PIN_ID_14,
QM_PIN_ID_15,
QM_PIN_ID_16,
QM_PIN_ID_17,
QM_PIN_ID_18,
QM_PIN_ID_19,
QM_PIN_ID_20,
QM_PIN_ID_21,
QM_PIN_ID_22,
QM_PIN_ID_23,
QM_PIN_ID_24,
QM_PIN_ID_0, /**< Pin id 0. */
QM_PIN_ID_1, /**< Pin id 1. */
QM_PIN_ID_2, /**< Pin id 2. */
QM_PIN_ID_3, /**< Pin id 3. */
QM_PIN_ID_4, /**< Pin id 4. */
QM_PIN_ID_5, /**< Pin id 5. */
QM_PIN_ID_6, /**< Pin id 6. */
QM_PIN_ID_7, /**< Pin id 7. */
QM_PIN_ID_8, /**< Pin id 8. */
QM_PIN_ID_9, /**< Pin id 9. */
QM_PIN_ID_10, /**< Pin id 10. */
QM_PIN_ID_11, /**< Pin id 11. */
QM_PIN_ID_12, /**< Pin id 12. */
QM_PIN_ID_13, /**< Pin id 13. */
QM_PIN_ID_14, /**< Pin id 14. */
QM_PIN_ID_15, /**< Pin id 15. */
QM_PIN_ID_16, /**< Pin id 16. */
QM_PIN_ID_17, /**< Pin id 17. */
QM_PIN_ID_18, /**< Pin id 18. */
QM_PIN_ID_19, /**< Pin id 19. */
QM_PIN_ID_20, /**< Pin id 20. */
QM_PIN_ID_21, /**< Pin id 21. */
QM_PIN_ID_22, /**< Pin id 22. */
QM_PIN_ID_23, /**< Pin id 23. */
QM_PIN_ID_24, /**< Pin id 24. */
#if (QUARK_SE)
QM_PIN_ID_25,
QM_PIN_ID_26,
QM_PIN_ID_27,
QM_PIN_ID_28,
QM_PIN_ID_29,
QM_PIN_ID_30,
QM_PIN_ID_31,
QM_PIN_ID_32,
QM_PIN_ID_33,
QM_PIN_ID_34,
QM_PIN_ID_35,
QM_PIN_ID_36,
QM_PIN_ID_37,
QM_PIN_ID_38,
QM_PIN_ID_39,
QM_PIN_ID_40,
QM_PIN_ID_41,
QM_PIN_ID_42,
QM_PIN_ID_43,
QM_PIN_ID_44,
QM_PIN_ID_45,
QM_PIN_ID_46,
QM_PIN_ID_47,
QM_PIN_ID_48,
QM_PIN_ID_49,
QM_PIN_ID_50,
QM_PIN_ID_51,
QM_PIN_ID_52,
QM_PIN_ID_53,
QM_PIN_ID_54,
QM_PIN_ID_55,
QM_PIN_ID_56,
QM_PIN_ID_57,
QM_PIN_ID_58,
QM_PIN_ID_59,
QM_PIN_ID_60,
QM_PIN_ID_61,
QM_PIN_ID_62,
QM_PIN_ID_63,
QM_PIN_ID_64,
QM_PIN_ID_65,
QM_PIN_ID_66,
QM_PIN_ID_67,
QM_PIN_ID_68,
QM_PIN_ID_25, /**< Pin id 25. */
QM_PIN_ID_26, /**< Pin id 26. */
QM_PIN_ID_27, /**< Pin id 27. */
QM_PIN_ID_28, /**< Pin id 28. */
QM_PIN_ID_29, /**< Pin id 29. */
QM_PIN_ID_30, /**< Pin id 30. */
QM_PIN_ID_31, /**< Pin id 31. */
QM_PIN_ID_32, /**< Pin id 32. */
QM_PIN_ID_33, /**< Pin id 33. */
QM_PIN_ID_34, /**< Pin id 34. */
QM_PIN_ID_35, /**< Pin id 35. */
QM_PIN_ID_36, /**< Pin id 36. */
QM_PIN_ID_37, /**< Pin id 37. */
QM_PIN_ID_38, /**< Pin id 38. */
QM_PIN_ID_39, /**< Pin id 39. */
QM_PIN_ID_40, /**< Pin id 40. */
QM_PIN_ID_41, /**< Pin id 41. */
QM_PIN_ID_42, /**< Pin id 42. */
QM_PIN_ID_43, /**< Pin id 43. */
QM_PIN_ID_44, /**< Pin id 44. */
QM_PIN_ID_45, /**< Pin id 45. */
QM_PIN_ID_46, /**< Pin id 46. */
QM_PIN_ID_47, /**< Pin id 47. */
QM_PIN_ID_48, /**< Pin id 48. */
QM_PIN_ID_49, /**< Pin id 49. */
QM_PIN_ID_50, /**< Pin id 50. */
QM_PIN_ID_51, /**< Pin id 51. */
QM_PIN_ID_52, /**< Pin id 52. */
QM_PIN_ID_53, /**< Pin id 53. */
QM_PIN_ID_54, /**< Pin id 54. */
QM_PIN_ID_55, /**< Pin id 55. */
QM_PIN_ID_56, /**< Pin id 56. */
QM_PIN_ID_57, /**< Pin id 57. */
QM_PIN_ID_58, /**< Pin id 58. */
QM_PIN_ID_59, /**< Pin id 59. */
QM_PIN_ID_60, /**< Pin id 60. */
QM_PIN_ID_61, /**< Pin id 61. */
QM_PIN_ID_62, /**< Pin id 62. */
QM_PIN_ID_63, /**< Pin id 63. */
QM_PIN_ID_64, /**< Pin id 64. */
QM_PIN_ID_65, /**< Pin id 65. */
QM_PIN_ID_66, /**< Pin id 66. */
QM_PIN_ID_67, /**< Pin id 67. */
QM_PIN_ID_68, /**< Pin id 68. */
#endif
QM_PIN_ID_NUM
} qm_pin_id_t;
@ -145,38 +145,50 @@ typedef enum {
/**
* Set up pin muxing for a SoC pin. Select one of the pin functions.
*
* @param [in] pin which pin to configure.
* @param [in] fn the function to assign to the pin.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
*/
qm_rc_t qm_pmux_select(qm_pin_id_t pin, qm_pmux_fn_t fn);
* @param[in] pin which pin to configure.
* @param[in] fn the function to assign to the pin.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
* */
int qm_pmux_select(const qm_pin_id_t pin, const qm_pmux_fn_t fn);
/**
* Set up pin's slew rate in the pin mux controller.
*
* @param [in] pin which pin to configure.
* @param [in] slew the slew rate to assign to the pin.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
* @param[in] pin which pin to configure.
* @param[in] slew the slew rate to assign to the pin.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_pmux_set_slew(qm_pin_id_t pin, qm_pmux_slew_t slew);
int qm_pmux_set_slew(const qm_pin_id_t pin, const qm_pmux_slew_t slew);
/**
* Enable input for a pin in the pin mux controller.
*
* @param [in] pin which pin to configure.
* @param [in] enable set to true to enable input.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
* @param[in] pin which pin to configure.
* @param[in] enable set to true to enable input.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_pmux_input_en(qm_pin_id_t pin, bool enable);
int qm_pmux_input_en(const qm_pin_id_t pin, const bool enable);
/**
* Enable pullup for a pin in the pin mux controller.
*
* @param [in] pin which pin to configure.
* @param [in] enable set to true to enable pullup.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
* @param[in] pin which pin to configure.
* @param[in] enable set to true to enable pullup.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_pmux_pullup_en(qm_pin_id_t pin, bool enable);
int qm_pmux_pullup_en(const qm_pin_id_t pin, const bool enable);
/**
* @}

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@ -34,7 +34,7 @@
#include "qm_soc_regs.h"
/**
* Pulse width modulation and Timer driver for Quark Microcontrollers.
* Pulse width modulation and Timer driver.
*
* @defgroup groupPWM PWM / Timer
* @{
@ -48,106 +48,116 @@
* PWM operating mode type.
*/
typedef enum {
QM_PWM_MODE_TIMER_FREE_RUNNING = 0,
QM_PWM_MODE_TIMER_COUNT = 2,
QM_PWM_MODE_PWM = 10
QM_PWM_MODE_TIMER_FREE_RUNNING = 0, /**< Timer: free runnig mode. */
QM_PWM_MODE_TIMER_COUNT = 2, /**< Timer: Counter mode. */
QM_PWM_MODE_PWM = 10 /**< Pwm mode. */
} qm_pwm_mode_t;
/**
* PWM / Timer configuration type.
*/
typedef struct {
uint32_t lo_count; /* Number of cycles the PWM output is driven low. In
timer mode, this is the timer load count. Must be
> 0. */
uint32_t hi_count; /* Number of cycles the PWM output is driven high.
Not applicable in timer mode. Must be > 0.*/
bool mask_interrupt;
qm_pwm_mode_t mode;
void (*callback)(uint32_t int_status);
uint32_t
lo_count; /**< Number of cycles the PWM output is driven low. In
timer mode, this is the timer load count. Must be
> 0. */
uint32_t hi_count; /**< Number of cycles the PWM output is driven high.
Not applicable in timer mode. Must be > 0.*/
bool mask_interrupt; /**< Mask interrupt. */
qm_pwm_mode_t mode; /**< Pwm mode. */
/**
* User callback.
*
* @param[in] data The callback user data.
* @param[in] int_status The timer status.
*/
void (*callback)(void *data, uint32_t int_status);
void *callback_data; /**< Callback user data. */
} qm_pwm_config_t;
/**
* PWM Interrupt Service Routine
*/
void qm_pwm_isr_0(void);
/**
* Change the configuration of a PWM channel. This includes low period load
* value, high period load value, interrupt enable/disable. If interrupts are
* enabled, registers an ISR with the given user callback function. When
* operating in PWM mode, 0% and 100% duty cycle is not available on Quark SE or
* Quark D2000. When setting the mode to PWM mode, hi_count must be > 0. In
* timer mode, the value of high count is ignored.
* Change the configuration of a PWM channel.
*
* @brief Set PWM channel configuration.
* @param [in] pwm Which PWM module to configure.
* @param [in] id PWM channel id to configure.
* @param [in] cfg New configuration for PWM.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
*/
qm_rc_t qm_pwm_set_config(const qm_pwm_t pwm, const qm_pwm_id_t id,
const qm_pwm_config_t *const cfg);
/**
* Get the current configuration of a PWM channel. This includes low
* period load value, high period load value, interrupt enable/disable.
*
* @brief Get PWM channel configuration.
* @param [in] pwm Which PWM module to get the configuration of.
* @param [in] id PWM channel id to get the configuration of.
* @param [out] cfg Current configuration for PWM.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
*/
qm_rc_t qm_pwm_get_config(const qm_pwm_t pwm, const qm_pwm_id_t id,
qm_pwm_config_t *const cfg);
/**
* Set the next period values of a PWM channel. This includes low
* period count and high period count. When operating in PWM mode, 0% and 100%
* duty cycle is not available on Quark SE or Quark D2000. When operating in PWM
* This includes low period load value, high period load value, interrupt
* enable/disable. If interrupts are enabled, registers an ISR with the given
* user callback function. When operating in PWM mode, 0% and 100% duty cycle
* is not available on Quark SE or Quark D2000. When setting the mode to PWM
* mode, hi_count must be > 0. In timer mode, the value of high count is
* ignored.
*
* @brief Set PWM channel configuration.
* @param[in] pwm Which PWM module to configure.
* @param[in] id PWM channel id to configure.
* @param[in] cfg New configuration for PWM. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
* */
int qm_pwm_set_config(const qm_pwm_t pwm, const qm_pwm_id_t id,
const qm_pwm_config_t *const cfg);
/**
* Set the next period values of a PWM channel.
*
* This includes low period count and high period count. When operating in PWM
* mode, 0% and 100% duty cycle is not available on Quark SE or Quark D2000.
* When operating in PWM mode, hi_count must be > 0. In timer mode, the value of
* high count is ignored.
*
* @brief Set PWM period counts.
* @param [in] pwm Which PWM module to set the counts of.
* @param [in] id PWM channel id to set.
* @param [in] lo_count Num of cycles the output is driven low.
* @param [in] hi_count Num of cycles the output is driven high.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
*/
qm_rc_t qm_pwm_set(const qm_pwm_t pwm, const qm_pwm_id_t id,
const uint32_t lo_count, const uint32_t hi_count);
* @param[in] pwm Which PWM module to set the counts of.
* @param[in] id PWM channel id to set.
* @param[in] lo_count Num of cycles the output is driven low.
* @param[in] hi_count Num of cycles the output is driven high.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
* */
int qm_pwm_set(const qm_pwm_t pwm, const qm_pwm_id_t id,
const uint32_t lo_count, const uint32_t hi_count);
/**
* Get the current period values of a PWM channel.
*
* @param [in] pwm Which PWM module to get the count of.
* @param [in] id PWM channel id to read the values of.
* @param [out] lo_count Num of cycles the output is driven low.
* @param [out] hi_count Num of cycles the output is driven high.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
*/
qm_rc_t qm_pwm_get(const qm_pwm_t pwm, const qm_pwm_id_t id,
uint32_t *const lo_count, uint32_t *const hi_count);
* @param[in] pwm Which PWM module to get the count of.
* @param[in] id PWM channel id to read the values of.
* @param[out] lo_count Num of cycles the output is driven low. This must not be
* NULL.
* @param[out] hi_count Num of cycles the output is driven high. This must not
* be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
* */
int qm_pwm_get(const qm_pwm_t pwm, const qm_pwm_id_t id,
uint32_t *const lo_count, uint32_t *const hi_count);
/**
* Start a PWM/timer channel.
*
* @param [in] pwm Which PWM block the PWM is in.
* @param [in] id PWM channel id to start.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
*/
qm_rc_t qm_pwm_start(const qm_pwm_t pwm, const qm_pwm_id_t id);
* @param[in] pwm Which PWM block the PWM is in.
* @param[in] id PWM channel id to start.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
* */
int qm_pwm_start(const qm_pwm_t pwm, const qm_pwm_id_t id);
/**
* Stop a PWM/timer channel.
*
* @param [in] pwm Which PWM block the PWM is in.
* @param [in] id PWM channel id to stop.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
*/
qm_rc_t qm_pwm_stop(const qm_pwm_t pwm, const qm_pwm_id_t id);
* @param[in] pwm Which PWM block the PWM is in.
* @param[in] id PWM channel id to stop.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
* */
int qm_pwm_stop(const qm_pwm_t pwm, const qm_pwm_id_t id);
/**
* @}

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@ -31,11 +31,11 @@
#define __QM_RTC_H__
#include "qm_common.h"
#include "qm_scss.h"
#include "qm_soc_regs.h"
#include "clk.h"
/**
* Real Time clock for Quark Microcontrollers.
* Real Time clock.
*
* @defgroup groupRTC RTC
* @{
@ -47,66 +47,57 @@
#define QM_RTC_CCR_INTERRUPT_MASK BIT(1)
#define QM_RTC_CCR_ENABLE BIT(2)
/** Number of RTC ticks in a second */
#define QM_RTC_ALARM_SECOND (32768 / BIT(QM_RTC_DIVIDER))
/** Number of RTC ticks in a minute */
#define QM_RTC_ALARM_MINUTE (QM_RTC_ALARM_SECOND * 60)
/** Number of RTC ticks in an hour */
#define QM_RTC_ALARM_HOUR (QM_RTC_ALARM_MINUTE * 60)
/** Number of RTC ticks in a day */
#define QM_RTC_ALARM_DAY (QM_RTC_ALARM_HOUR * 24)
/**
* RTC configuration type.
*/
typedef struct {
uint32_t init_val; /* Initial value in RTC clocks */
bool alarm_en; /* Alarm enable */
uint32_t alarm_val; /* Alarm value in RTC clocks */
void (*callback)(void); /* Callback function */
uint32_t init_val; /**< Initial value in RTC clocks. */
bool alarm_en; /**< Alarm enable. */
uint32_t alarm_val; /**< Alarm value in RTC clocks. */
/**
* User callback.
*
* @param[in] data User defined data.
*/
void (*callback)(void *data);
void *callback_data; /**< Callback user data. */
} qm_rtc_config_t;
/**
* RTC Interrupt Service Routine.
* Set RTC configuration.
*
* This includes the initial value in RTC clock periods, and the alarm value if
* an alarm is required. If the alarm is enabled, register an ISR with the user
* defined callback function.
*
* @param[in] rtc RTC index.
* @param[in] cfg New RTC configuration. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
void qm_rtc_isr_0(void);
int qm_rtc_set_config(const qm_rtc_t rtc, const qm_rtc_config_t *const cfg);
/**
* Set RTC module configuration. Including the initial
* value in RTC clock periods, and the alarm value if an alarm is
* required. If the alarm is enabled, register an ISR with the
* user defined callback function.
* Set Alarm value.
*
* @brief Set RTC configuration.
* @param [in] rtc RTC index.
* @param [in] cfg New RTC configuration.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
*/
qm_rc_t qm_rtc_set_config(const qm_rtc_t rtc, const qm_rtc_config_t *const cfg);
/**
* Set a new RTC alarm value after an alarm, that has been set
* using the qm_rtc_set_config function, has expired and a new
* alarm value is required.
* Set a new RTC alarm value after an alarm, that has been set using the
* qm_rtc_set_config function, has expired and a new alarm value is required.
*
* @brief Set Alarm value.
* @param [in] rtc RTC index.
* @param [in] alarm_val Value to set alarm to.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
*/
qm_rc_t qm_rtc_set_alarm(const qm_rtc_t rtc, const uint32_t alarm_val);
/**
* Get current configuration of RTC module. This includes the
* initial value in RTC clock periods, if an alarm is required
* and the tick value of the alarm in RTC clock periods.
* @param[in] rtc RTC index.
* @param[in] alarm_val Value to set alarm to.
*
* @brief Get RTC configuration.
* @param [in] rtc RTC index.
* @param [out] cfg New RTC configuration.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_rtc_get_config(const qm_rtc_t rtc, qm_rtc_config_t *const cfg);
int qm_rtc_set_alarm(const qm_rtc_t rtc, const uint32_t alarm_val);
/**
* @}

View file

@ -1,288 +0,0 @@
/*
* Copyright (c) 2015, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __SCSS_H__
#define __SCSS_H__
#include "qm_common.h"
#include "qm_soc_regs.h"
#include <x86intrin.h>
/**
* System control subsystem for Quark Microcontrollers.
*
* @defgroup groupSCSS System Control Subsystem
* @{
*/
#define OSC0_SI_FREQ_SEL_MASK (0xFFFFFCFF)
/**
* When using an external crystal, this value must be set to the number of
* system ticks per micro second. The expected value is 32 ticks for a 32MHz
* crystal.
*/
#define SYS_TICKS_PER_US_XTAL (32)
#define SYS_TICKS_PER_US_32MHZ (32)
#define SYS_TICKS_PER_US_16MHZ (16)
#define SYS_TICKS_PER_US_8MHZ (8)
#define SYS_TICKS_PER_US_4MHZ (4)
/**
* System clock divider type.
*/
typedef enum {
CLK_SYS_DIV_1,
CLK_SYS_DIV_2,
CLK_SYS_DIV_4,
CLK_SYS_DIV_8,
#if (QUARK_D2000)
CLK_SYS_DIV_16,
CLK_SYS_DIV_32,
CLK_SYS_DIV_64,
CLK_SYS_DIV_128,
#endif
CLK_SYS_DIV_NUM
} clk_sys_div_t;
/**
* System clock mode type.
*/
typedef enum {
CLK_SYS_HYB_OSC_32MHZ,
CLK_SYS_HYB_OSC_16MHZ,
CLK_SYS_HYB_OSC_8MHZ,
CLK_SYS_HYB_OSC_4MHZ,
CLK_SYS_RTC_OSC,
CLK_SYS_CRYSTAL_OSC
} clk_sys_mode_t;
/**
* Peripheral clock divider type.
*/
typedef enum {
CLK_PERIPH_DIV_1,
CLK_PERIPH_DIV_2,
CLK_PERIPH_DIV_4,
CLK_PERIPH_DIV_8
} clk_periph_div_t;
/**
* GPIO clock debounce divider type.
*/
typedef enum {
CLK_GPIO_DB_DIV_1,
CLK_GPIO_DB_DIV_2,
CLK_GPIO_DB_DIV_4,
CLK_GPIO_DB_DIV_8,
CLK_GPIO_DB_DIV_16,
CLK_GPIO_DB_DIV_32,
CLK_GPIO_DB_DIV_64,
CLK_GPIO_DB_DIV_128
} clk_gpio_db_div_t;
/**
* External crystal clock divider type.
*/
typedef enum {
CLK_EXT_DIV_1,
CLK_EXT_DIV_2,
CLK_EXT_DIV_4,
CLK_EXT_DIV_8
} clk_ext_div_t;
/**
* RTC clock divider type.
*/
typedef enum {
CLK_RTC_DIV_1,
CLK_RTC_DIV_2,
CLK_RTC_DIV_4,
CLK_RTC_DIV_8,
CLK_RTC_DIV_16,
CLK_RTC_DIV_32,
CLK_RTC_DIV_64,
CLK_RTC_DIV_128,
CLK_RTC_DIV_256,
CLK_RTC_DIV_512,
CLK_RTC_DIV_1024,
CLK_RTC_DIV_2048,
CLK_RTC_DIV_4096,
CLK_RTC_DIV_8192,
CLK_RTC_DIV_16384,
CLK_RTC_DIV_32768
} clk_rtc_div_t;
/**
* SCSS peripheral clock register type.
*/
typedef enum {
CLK_PERIPH_REGISTER = BIT(0),
CLK_PERIPH_CLK = BIT(1),
CLK_PERIPH_I2C_M0 = BIT(2),
#if (QUARK_SE)
CLK_PERIPH_I2C_M1 = BIT(3),
#endif
CLK_PERIPH_SPI_S = BIT(4),
CLK_PERIPH_SPI_M0 = BIT(5),
#if (QUARK_SE)
CLK_PERIPH_SPI_M1 = BIT(6),
#endif
CLK_PERIPH_GPIO_INTERRUPT = BIT(7),
CLK_PERIPH_GPIO_DB = BIT(8),
#if (QUARK_SE)
CLK_PERIPH_I2S = BIT(9),
#endif
CLK_PERIPH_WDT_REGISTER = BIT(10),
CLK_PERIPH_RTC_REGISTER = BIT(11),
CLK_PERIPH_PWM_REGISTER = BIT(12),
CLK_PERIPH_GPIO_REGISTER = BIT(13),
CLK_PERIPH_SPI_M0_REGISTER = BIT(14),
#if (QUARK_SE)
CLK_PERIPH_SPI_M1_REGISTER = BIT(15),
#endif
CLK_PERIPH_SPI_S_REGISTER = BIT(16),
CLK_PERIPH_UARTA_REGISTER = BIT(17),
CLK_PERIPH_UARTB_REGISTER = BIT(18),
CLK_PERIPH_I2C_M0_REGISTER = BIT(19),
#if (QUARK_SE)
CLK_PERIPH_I2C_M1_REGISTER = BIT(20),
CLK_PERIPH_I2S_REGISTER = BIT(21),
CLK_PERIPH_ALL = 0x3FFFFF
#elif(QUARK_D2000)
CLK_PERIPH_ADC = BIT(22),
CLK_PERIPH_ADC_REGISTER = BIT(23),
CLK_PERIPH_ALL = 0xCFFFFF
#else
#error "Unsupported / unspecified interrupt controller detected."
#endif
} clk_periph_t;
/**
* Change the operating mode and clock divisor of the system
* clock source. Changing this clock speed affects all
* peripherals.
*
* @param [in] mode System clock source operating mode
* @param [in] div System clock divisor.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
*/
qm_rc_t clk_sys_set_mode(const clk_sys_mode_t mode, const clk_sys_div_t div);
/**
* Change ADC clock divider value. The new divider value is set to N, where N is
* the value set by the function and is between 1 and 1024.
*
* @brief Change divider value of ADC clock.
* @param [in] div Divider value for the ADC clock.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
*/
qm_rc_t clk_adc_set_div(const uint16_t div);
/**
* Change Peripheral clock divider value. The maximum divisor is
* /8. These peripherals include GPIO Interrupt, SPI, I2C and
* ADC.
*
* @brief Change divider value of peripheral clock.
* @param [in] div Divider value for the peripheral clock.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
*/
qm_rc_t clk_periph_set_div(const clk_periph_div_t div);
/**
* Change GPIO debounce clock divider value. The maximum divisor
* is /128.
*
* @brief Change divider value of GPIO debounce clock.
* @param [in] div Divider value for the GPIO debounce clock.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
*/
qm_rc_t clk_gpio_db_set_div(const clk_gpio_db_div_t div);
/**
* Change External clock divider value. The maximum divisor is
* /8.
*
* @brief Change divider value of external clock.
* @param [in] div Divider value for the external clock.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
*/
qm_rc_t clk_ext_set_div(const clk_ext_div_t div);
/**
* Change RTC divider value. The maximum divisor is /32768.
*
* @brief Change divider value of RTC.
* @param [in] div Divider value for the RTC.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
*/
qm_rc_t clk_rtc_set_div(const clk_rtc_div_t div);
/**
* Enable clocks for peripherals / registers.
*
* @param [in] clocks Which peripheral and register clocks to enable.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
*/
qm_rc_t clk_periph_enable(const clk_periph_t clocks);
/**
* Disable clocks for peripherals / registers.
*
* @param [in] clocks Which peripheral and register clocks to disable.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
*/
qm_rc_t clk_periph_disable(const clk_periph_t clocks);
/**
* Get number of system ticks per micro second.
*
* @return uint32_t Number of system ticks per micro second.
*/
uint32_t clk_sys_get_ticks_per_us(void);
/**
* @brief Idle loop the processor for at least the value given in microseconds.
*
* This function will wait until at least the given number of microseconds has
* elapsed since calling this function. Note it is dependent on the system clock
* speed. The delay parameter does not include, calling the function, returning
* from it, calculation setup and while loops.
*
* @param [in] microseconds Minimum number of micro seconds to delay for.
* @return void.
*/
void clk_sys_udelay(uint32_t microseconds);
/**
* @}
*/
#endif /* __SCSS_H__ */

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@ -32,6 +32,7 @@
#include "qm_common.h"
#include "qm_soc_regs.h"
#include "qm_dma.h"
/**
* SPI peripheral driver for Quark Microcontrollers.
@ -44,157 +45,177 @@
* QM SPI frame size type.
*/
typedef enum {
QM_SPI_FRAME_SIZE_4_BIT = 3, /* Min. size is 4 bits. */
QM_SPI_FRAME_SIZE_5_BIT,
QM_SPI_FRAME_SIZE_6_BIT,
QM_SPI_FRAME_SIZE_7_BIT,
QM_SPI_FRAME_SIZE_8_BIT,
QM_SPI_FRAME_SIZE_9_BIT,
QM_SPI_FRAME_SIZE_10_BIT,
QM_SPI_FRAME_SIZE_11_BIT,
QM_SPI_FRAME_SIZE_12_BIT,
QM_SPI_FRAME_SIZE_13_BIT,
QM_SPI_FRAME_SIZE_14_BIT,
QM_SPI_FRAME_SIZE_15_BIT,
QM_SPI_FRAME_SIZE_16_BIT,
QM_SPI_FRAME_SIZE_17_BIT,
QM_SPI_FRAME_SIZE_18_BIT,
QM_SPI_FRAME_SIZE_19_BIT,
QM_SPI_FRAME_SIZE_20_BIT,
QM_SPI_FRAME_SIZE_21_BIT,
QM_SPI_FRAME_SIZE_22_BIT,
QM_SPI_FRAME_SIZE_23_BIT,
QM_SPI_FRAME_SIZE_24_BIT,
QM_SPI_FRAME_SIZE_25_BIT,
QM_SPI_FRAME_SIZE_26_BIT,
QM_SPI_FRAME_SIZE_27_BIT,
QM_SPI_FRAME_SIZE_28_BIT,
QM_SPI_FRAME_SIZE_29_BIT,
QM_SPI_FRAME_SIZE_30_BIT,
QM_SPI_FRAME_SIZE_31_BIT,
QM_SPI_FRAME_SIZE_32_BIT
QM_SPI_FRAME_SIZE_4_BIT = 3, /**< 4 bit frame. */
QM_SPI_FRAME_SIZE_5_BIT, /**< 5 bit frame. */
QM_SPI_FRAME_SIZE_6_BIT, /**< 6 bit frame. */
QM_SPI_FRAME_SIZE_7_BIT, /**< 7 bit frame. */
QM_SPI_FRAME_SIZE_8_BIT, /**< 8 bit frame. */
QM_SPI_FRAME_SIZE_9_BIT, /**< 9 bit frame. */
QM_SPI_FRAME_SIZE_10_BIT, /**< 10 bit frame. */
QM_SPI_FRAME_SIZE_11_BIT, /**< 11 bit frame. */
QM_SPI_FRAME_SIZE_12_BIT, /**< 12 bit frame. */
QM_SPI_FRAME_SIZE_13_BIT, /**< 13 bit frame. */
QM_SPI_FRAME_SIZE_14_BIT, /**< 14 bit frame. */
QM_SPI_FRAME_SIZE_15_BIT, /**< 15 bit frame. */
QM_SPI_FRAME_SIZE_16_BIT, /**< 16 bit frame. */
QM_SPI_FRAME_SIZE_17_BIT, /**< 17 bit frame. */
QM_SPI_FRAME_SIZE_18_BIT, /**< 18 bit frame. */
QM_SPI_FRAME_SIZE_19_BIT, /**< 19 bit frame. */
QM_SPI_FRAME_SIZE_20_BIT, /**< 20 bit frame. */
QM_SPI_FRAME_SIZE_21_BIT, /**< 21 bit frame. */
QM_SPI_FRAME_SIZE_22_BIT, /**< 22 bit frame. */
QM_SPI_FRAME_SIZE_23_BIT, /**< 23 bit frame. */
QM_SPI_FRAME_SIZE_24_BIT, /**< 24 bit frame. */
QM_SPI_FRAME_SIZE_25_BIT, /**< 25 bit frame. */
QM_SPI_FRAME_SIZE_26_BIT, /**< 26 bit frame. */
QM_SPI_FRAME_SIZE_27_BIT, /**< 27 bit frame. */
QM_SPI_FRAME_SIZE_28_BIT, /**< 28 bit frame. */
QM_SPI_FRAME_SIZE_29_BIT, /**< 29 bit frame. */
QM_SPI_FRAME_SIZE_30_BIT, /**< 30 bit frame. */
QM_SPI_FRAME_SIZE_31_BIT, /**< 31 bit frame. */
QM_SPI_FRAME_SIZE_32_BIT /**< 32 bit frame. */
} qm_spi_frame_size_t;
/**
* SPI transfer mode type.
*/
typedef enum {
QM_SPI_TMOD_TX_RX, /**< Transmit & Receive */
QM_SPI_TMOD_TX, /**< Transmit Only */
QM_SPI_TMOD_RX, /**< Receive Only */
QM_SPI_TMOD_EEPROM_READ /**< EEPROM Read */
QM_SPI_TMOD_TX_RX, /**< Transmit & Receive. */
QM_SPI_TMOD_TX, /**< Transmit Only. */
QM_SPI_TMOD_RX, /**< Receive Only. */
QM_SPI_TMOD_EEPROM_READ /**< EEPROM Read. */
} qm_spi_tmode_t;
/**
* SPI bus mode type.
*/
typedef enum {
QM_SPI_BMODE_0, /**< Clock Polarity = 0, Clock Phase = 0 */
QM_SPI_BMODE_1, /**< Clock Polarity = 0, Clock Phase = 1 */
QM_SPI_BMODE_2, /**< Clock Polarity = 1, Clock Phase = 0 */
QM_SPI_BMODE_3 /**< Clock Polarity = 1, Clock Phase = 1 */
QM_SPI_BMODE_0, /**< Clock Polarity = 0, Clock Phase = 0. */
QM_SPI_BMODE_1, /**< Clock Polarity = 0, Clock Phase = 1. */
QM_SPI_BMODE_2, /**< Clock Polarity = 1, Clock Phase = 0. */
QM_SPI_BMODE_3 /**< Clock Polarity = 1, Clock Phase = 1. */
} qm_spi_bmode_t;
/**
* SPI slave select type.
*
* QM_SPI_SS_DISABLED prevents the controller from starting a transfer.
*/
typedef enum {
QM_SPI_SS_NONE = 0,
QM_SPI_SS_0 = BIT(0),
QM_SPI_SS_1 = BIT(1),
QM_SPI_SS_2 = BIT(2),
QM_SPI_SS_3 = BIT(3),
QM_SPI_SS_DISABLED = 0, /**< Slave select disable. */
QM_SPI_SS_0 = BIT(0), /**< Slave Select 0. */
QM_SPI_SS_1 = BIT(1), /**< Slave Select 1. */
QM_SPI_SS_2 = BIT(2), /**< Slave Select 2. */
QM_SPI_SS_3 = BIT(3), /**< Slave Select 3. */
} qm_spi_slave_select_t;
/**
* SPI status
*/
typedef enum {
QM_SPI_FREE,
QM_SPI_BUSY,
QM_SPI_TX_ERROR,
QM_SPI_EINVAL
QM_SPI_IDLE, /**< SPI device is not in use. */
QM_SPI_BUSY, /**< SPI device is busy. */
QM_SPI_RX_OVERFLOW /**< RX transfer has overflown. */
} qm_spi_status_t;
/**
* SPI configuration type.
*/
typedef struct {
qm_spi_frame_size_t frame_size; /**< Frame Size */
qm_spi_tmode_t transfer_mode; /**< Transfer mode (enum) */
qm_spi_bmode_t bus_mode; /**< Bus mode (enum) */
uint16_t clk_divider; /**< SCK = SPI_clock/clk_divider. A value of 0
will disable SCK. */
qm_spi_frame_size_t frame_size; /**< Frame Size. */
qm_spi_tmode_t transfer_mode; /**< Transfer mode (enum). */
qm_spi_bmode_t bus_mode; /**< Bus mode (enum). */
/**
* SCK = SPI_clock/clk_divider.
*
* A value of 0 will disable SCK.
*/
uint16_t clk_divider;
} qm_spi_config_t;
/**
* SPI IRQ transfer type.
*/
typedef struct {
uint8_t *tx; /* Write data */
uint32_t tx_len; /* Write data Length */
uint8_t *rx; /* Read data */
uint32_t rx_len; /* Read buffer length */
/* Write callback */
void (*tx_callback)(uint32_t id, uint32_t len);
/* Read callback */
void (*rx_callback)(uint32_t id, uint32_t len);
/* Error callback */
void (*err_callback)(uint32_t id, qm_rc_t status);
uint32_t id; /* Callback identifier */
uint8_t *tx; /**< Write data. */
uint16_t tx_len; /**< Write data Length. */
uint8_t *rx; /**< Read data. */
uint16_t rx_len; /**< Read buffer length. */
/**
* Transfer callback.
*
* Called after all data is transmitted/received or if the driver
* detects an error during the SPI transfer.
*
* @param[in] data The callback user data.
* @param[in] error 0 on success.
* Negative @ref errno for possible error codes.
* @param[in] status SPI driver status.
* @param[in] len Length of the SPI transfer if successful, 0
* otherwise.
*/
void (*callback)(void *data, int error, qm_spi_status_t status,
uint16_t len);
void *callback_data; /**< Callback user data. */
} qm_spi_async_transfer_t;
/**
* SPI transfer type.
*/
typedef struct {
uint8_t *tx; /* Write Data */
uint32_t tx_len; /* Write Data Length */
uint8_t *rx; /* Read Data */
uint32_t rx_len; /* Receive Data Length */
uint8_t *tx; /**< Write Data. */
uint16_t tx_len; /**< Write Data Length. */
uint8_t *rx; /**< Read Data. */
uint16_t rx_len; /**< Receive Data Length. */
} qm_spi_transfer_t;
/**
* Change the configuration of a SPI module. This includes transfer mode, bus
* mode and clock divider.
* Set SPI configuration.
*
* @brief Set SPI configuration.
* @param [in] spi Which SPI module to configure.
* @param [in] cfg New configuration for SPI.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
*/
qm_rc_t qm_spi_set_config(const qm_spi_t spi,
const qm_spi_config_t *const cfg);
/**
* Get the current configuration of a SPI module. This includes transfer mode,
* bus mode and clock divider.
* Change the configuration of a SPI module.
* This includes transfer mode, bus mode and clock divider.
*
* @brief Get SPI configuration.
* @param [in] spi Which SPI module to read the configuration of.
* @param [in] cfg Current configuration of SPI.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
* @param[in] spi Which SPI module to configure.
* @param[in] cfg New configuration for SPI. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_spi_get_config(const qm_spi_t spi, qm_spi_config_t *const cfg);
int qm_spi_set_config(const qm_spi_t spi, const qm_spi_config_t *const cfg);
/**
* Select which slave to perform SPI transmissions on.
*
* @param [in] spi Which SPI module to configure.
* @param [in] ss Which slave select line to enable when doing transmissions.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
* @param[in] spi Which SPI module to configure.
* @param[in] ss Which slave select line to enable when doing transmissions.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_spi_slave_select(const qm_spi_t spi,
const qm_spi_slave_select_t ss);
int qm_spi_slave_select(const qm_spi_t spi, const qm_spi_slave_select_t ss);
/**
* Get SPI bus status.
*
* Retrieve SPI bus status. Return QM_SPI_BUSY if transmitting data or data Tx
* FIFO not empty.
*
* @brief Get SPI bus status.
* @param [in] spi Which SPI to read the status of.
* @return qm_spi_status_t Returns SPI specific return code.
* @param[in] spi Which SPI to read the status of.
* @param[out] status Get spi status. This must not be null.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_spi_status_t qm_spi_get_status(const qm_spi_t spi);
int qm_spi_get_status(const qm_spi_t spi, qm_spi_status_t *const status);
/**
* Multi-frame read / write on SPI.
*
* Perform a multi-frame read/write on the SPI bus. This is a blocking
* synchronous call. If the SPI is currently in use, the function will wait
* until the SPI is free before beginning the transfer. If transfer mode is
@ -202,53 +223,143 @@ qm_spi_status_t qm_spi_get_status(const qm_spi_t spi);
* Similarly, for transmit-only transfers (QM_SPI_TMOD_TX) rx_len must be 0,
* while for receive-only transfers (QM_SPI_TMOD_RX) tx_len must be 0.
*
* @brief Multi-frame read / write on SPI.
* @param [in] spi Which SPI to read/write on.
* @param [in] xfer Structure containing pre-allocated write and read data
* buffers.
* @return qm_rc_t Returns QM_RC_OK on success, error code otherwise.
* For starting a transfer, this controller demands at least one slave
* select line (SS) to be enabled. Thus, a call to qm_spi_slave_select()
* with one of the four SS valid lines is mandatory. This is true even if
* the native slave select line is not used (i.e. when a GPIO is used to
* drive the SS signal manually).
*
* @param[in] spi Which SPI to read/write on.
* @param[in] xfer Structure containing pre-allocated write and read data
* buffers. This must not be NULL.
* @param[out] status Get spi status.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_spi_transfer(const qm_spi_t spi, qm_spi_transfer_t *const xfer);
int qm_spi_transfer(const qm_spi_t spi, const qm_spi_transfer_t *const xfer,
qm_spi_status_t *const status);
/**
* Interrupt based transfer on SPI.
*
* Perform an interrupt based transfer on the SPI bus. The function will
* replenish/empty TX/RX FIFOs on SPI empty/full interrupts. If transfer
* mode is full duplex (QM_SPI_TMOD_TX_RX), then tx_len and rx_len must be
* equal and both callbacks cannot be null. Similarly, for transmit-only
* transfers (QM_SPI_TMOD_TX) rx_len must be 0 and tx_callback cannot be null,
* while for receive-only transfers (QM_SPI_TMOD_RX) tx_len must be 0 and
* rx_callback cannot be null.
* equal. For transmit-only transfers (QM_SPI_TMOD_TX) rx_len must be 0
* while for receive-only transfers (QM_SPI_TMOD_RX) tx_len must be 0.
*
* @brief Interrupt based transfer on SPI.
* @param [in] spi Which SPI to transfer to / from.
* @param [in] xfer Transfer structure includes write / read data and length;
* write, read and error callback functions and a callback
* identifier. This pointer must be kept valid until the
* transfer is complete. The error callback cannot be null.
* @return qm_rc_t Returns QM_RC_OK on success, error code otherwise.
* For starting a transfer, this controller demands at least one slave
* select line (SS) to be enabled. Thus, a call to qm_spi_slave_select()
* with one of the four SS valid lines is mandatory. This is true even if
* the native slave select line is not used (i.e. when a GPIO is used to
* drive the SS signal manually).
*
* @param[in] spi Which SPI to transfer to / from.
* @param[in] xfer Transfer structure includes write / read buffers, length,
* user callback function and the callback context data.
* The structure must not be NULL and must be kept valid until
* the transfer is complete.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_spi_irq_transfer(const qm_spi_t spi,
qm_spi_async_transfer_t *const xfer);
int qm_spi_irq_transfer(const qm_spi_t spi,
const qm_spi_async_transfer_t *const xfer);
/**
* Interrupt service routine for the SPI masters
* Configure a DMA channel with a specific transfer direction.
*
* The user is responsible for managing the allocation of the pool of DMA
* channels provided by each DMA core to the different peripheral drivers
* that require them.
*
* Note that a SPI controller cannot use different DMA cores to manage
* transfers in different directions.
*
* This function configures DMA channel parameters that are unlikely to change
* between transfers, like transaction width, burst size, and handshake
* interface parameters. The user will likely only call this function once for
* the lifetime of an application unless the channel needs to be repurposed.
*
* Note that qm_dma_init() must first be called before configuring a channel.
*
* @param[in] spi SPI controller identifier.
* @param[in] dma_ctrl_id DMA controller identifier.
* @param[in] dma_channel_id DMA channel identifier.
* @param[in] dma_channel_direction DMA channel direction, either
* QM_DMA_MEMORY_TO_PERIPHERAL (TX transfer) or QM_DMA_PERIPHERAL_TO_MEMORY
* (RX transfer).
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
void qm_spi_master_0_isr(void);
#if (QUARK_SE)
void qm_spi_master_1_isr(void);
#endif /* QUARK_SE */
int qm_spi_dma_channel_config(
const qm_spi_t spi, const qm_dma_t dma_ctrl_id,
const qm_dma_channel_id_t dma_channel_id,
const qm_dma_channel_direction_t dma_channel_direction);
/**
* Terminate the current IRQ or DMA transfer on the SPI bus.
* This will cause the relevant callbacks to be called.
* Perform a DMA-based transfer on the SPI bus.
*
* @brief Terminate SPI IRQ/DMA transfer.
* @param [in] spi Which SPI to cancel the current transfer.
* @return qm_rc_t Returns QM_RC_OK on success, error code otherwise.
* If transfer mode is full duplex (QM_SPI_TMOD_TX_RX), then tx_len and
* rx_len must be equal and neither of both callbacks can be NULL.
* Similarly, for transmit-only transfers (QM_SPI_TMOD_TX) rx_len must be 0
* and tx_callback cannot be NULL, while for receive-only transfers
* (QM_SPI_TMOD_RX) tx_len must be 0 and rx_callback cannot be NULL.
* Transfer length is limited to 4KB.
*
* For starting a transfer, this controller demands at least one slave
* select line (SS) to be enabled. Thus, a call to qm_spi_slave_select()
* with one of the four SS valid lines is mandatory. This is true even if
* the native slave select line is not used (i.e. when a GPIO is used to
* drive the SS signal manually).
*
* Note that qm_spi_dma_channel_config() must first be called in order to
* configure all DMA channels needed for a transfer.
*
* @param[in] spi SPI controller identifier.
* @param[in] xfer Structure containing pre-allocated write and read data
* buffers and callback functions. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_spi_transfer_terminate(const qm_spi_t spi);
int qm_spi_dma_transfer(const qm_spi_t spi,
const qm_spi_async_transfer_t *const xfer);
/**
* Terminate SPI IRQ transfer.
*
* Terminate the current IRQ transfer on the SPI bus.
* This will cause the user callback to be called with
* error code set to -ECANCELED.
*
* @param[in] spi Which SPI to cancel the current transfer.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_spi_irq_transfer_terminate(const qm_spi_t spi);
/**
* Terminate the current DMA transfer on the SPI bus.
*
* Terminate the current DMA transfer on the SPI bus.
* This will cause the relevant callbacks to be invoked.
*
* @param[in] spi SPI controller identifier.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_spi_dma_transfer_terminate(const qm_spi_t spi);
/**
* @}
*/

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@ -32,283 +32,457 @@
#include "qm_common.h"
#include "qm_soc_regs.h"
#include "qm_dma.h"
/**
* UART driver for Quark Microcontrollers.
* UART peripheral driver.
*
* @defgroup groupUART UART
* @{
*/
/* Register fields */
/** Divisor Latch Access Bit. */
#define QM_UART_LCR_DLAB BIT(7)
/** Auto Flow Control Enable Bit. */
#define QM_UART_MCR_AFCE BIT(5)
/** Request to Send Bit. */
#define QM_UART_MCR_RTS BIT(1)
/** FIFO Enable Bit. */
#define QM_UART_FCR_FIFOE BIT(0)
/** Reset Receive FIFO. */
#define QM_UART_FCR_RFIFOR BIT(1)
/** Reset Transmit FIFO. */
#define QM_UART_FCR_XFIFOR BIT(2)
/** FIFO half RX, half TX Threshold. */
#define QM_UART_FCR_DEFAULT_TX_RX_THRESHOLD (0xB0)
/** FIFO 1 byte RX, half TX Threshold. */
#define QM_UART_FCR_TX_1_2_RX_0_THRESHOLD (0x30)
/** FIFO half RX, empty TX Threshold. */
#define QM_UART_FCR_TX_0_RX_1_2_THRESHOLD (0x80)
/** Transmit Holding Register Empty. */
#define QM_UART_IIR_THR_EMPTY (0x02)
/** Received Data Available. */
#define QM_UART_IIR_RECV_DATA_AVAIL (0x04)
/** Receiver Line Status. */
#define QM_UART_IIR_RECV_LINE_STATUS (0x06)
/** Character Timeout. */
#define QM_UART_IIR_CHAR_TIMEOUT (0x0C)
/** Interrupt ID Mask. */
#define QM_UART_IIR_IID_MASK (0x0F)
/** Data Ready Bit. */
#define QM_UART_LSR_DR BIT(0)
/** Overflow Error Bit. */
#define QM_UART_LSR_OE BIT(1)
/** Parity Error Bit. */
#define QM_UART_LSR_PE BIT(2)
/** Framing Error Bit. */
#define QM_UART_LSR_FE BIT(3)
/** Break Interrupt Bit. */
#define QM_UART_LSR_BI BIT(4)
/** Transmit Holding Register Empty Bit. */
#define QM_UART_LSR_THRE BIT(5)
/** Transmitter Empty Bit. */
#define QM_UART_LSR_TEMT BIT(6)
/** Receiver FIFO Error Bit. */
#define QM_UART_LSR_RFE BIT(7)
/* Enable Transmit Holding Register Empty Interrupt*/
#define QM_UART_IER_ETBEI BIT(1)
/* Enable Received Data Available Interrupt */
/** Enable Received Data Available Interrupt. */
#define QM_UART_IER_ERBFI BIT(0)
/** Enable Transmit Holding Register Empty Interrupt. */
#define QM_UART_IER_ETBEI BIT(1)
/** Enable Receiver Line Status Interrupt. */
#define QM_UART_IER_ELSI BIT(2)
/** Programmable THRE Interrupt Mode. */
#define QM_UART_IER_PTIME BIT(7)
/** Line Status Errors. */
#define QM_UART_LSR_ERROR_BITS \
(QM_UART_LSR_OE | QM_UART_LSR_PE | QM_UART_LSR_FE | QM_UART_LSR_BI)
/** FIFO Depth. */
#define QM_UART_FIFO_DEPTH (16)
/** FIFO Half Depth. */
#define QM_UART_FIFO_HALF_DEPTH (QM_UART_FIFO_DEPTH / 2)
/** Divisor Latch High Offset. */
#define QM_UART_CFG_BAUD_DLH_OFFS 16
/** Divisor Latch Low Offset. */
#define QM_UART_CFG_BAUD_DLL_OFFS 8
/** Divisor Latch Fraction Offset. */
#define QM_UART_CFG_BAUD_DLF_OFFS 0
/** Divisor Latch High Mask. */
#define QM_UART_CFG_BAUD_DLH_MASK (0xFF << QM_UART_CFG_BAUD_DLH_OFFS)
/** Divisor Latch Low Mask. */
#define QM_UART_CFG_BAUD_DLL_MASK (0xFF << QM_UART_CFG_BAUD_DLL_OFFS)
/** Divisor Latch Fraction Mask. */
#define QM_UART_CFG_BAUD_DLF_MASK (0xFF << QM_UART_CFG_BAUD_DLF_OFFS)
/** Divisor Latch Packing Helper. */
#define QM_UART_CFG_BAUD_DL_PACK(dlh, dll, dlf) \
(dlh << QM_UART_CFG_BAUD_DLH_OFFS | dll << QM_UART_CFG_BAUD_DLL_OFFS | \
dlf << QM_UART_CFG_BAUD_DLF_OFFS)
/** Divisor Latch High Unpacking Helper. */
#define QM_UART_CFG_BAUD_DLH_UNPACK(packed) \
((packed & QM_UART_CFG_BAUD_DLH_MASK) >> QM_UART_CFG_BAUD_DLH_OFFS)
/** Divisor Latch Low Unpacking Helper. */
#define QM_UART_CFG_BAUD_DLL_UNPACK(packed) \
((packed & QM_UART_CFG_BAUD_DLL_MASK) >> QM_UART_CFG_BAUD_DLL_OFFS)
/** Divisor Latch Fraction Unpacking Helper. */
#define QM_UART_CFG_BAUD_DLF_UNPACK(packed) \
((packed & QM_UART_CFG_BAUD_DLF_MASK) >> QM_UART_CFG_BAUD_DLF_OFFS)
/**
* UART Line control.
*/
typedef enum {
QM_UART_LC_5N1 = 0x00, /**< 5 data bits, no parity, 1 stop bit */
QM_UART_LC_5N1_5 = 0x04, /**< 5 data bits, no parity, 1.5 stop bits */
QM_UART_LC_5E1 = 0x18, /**< 5 data bits, even parity, 1 stop bit */
QM_UART_LC_5E1_5 = 0x1c, /**< 5 data bits, even parity, 1.5 stop bits */
QM_UART_LC_5O1 = 0x08, /**< 5 data bits, odd parity, 1 stop bit */
QM_UART_LC_5O1_5 = 0x0c, /**< 5 data bits, odd parity, 1.5 stop bits */
QM_UART_LC_6N1 = 0x01, /**< 6 data bits, no parity, 1 stop bit */
QM_UART_LC_6N2 = 0x05, /**< 6 data bits, no parity, 2 stop bits */
QM_UART_LC_6E1 = 0x19, /**< 6 data bits, even parity, 1 stop bit */
QM_UART_LC_6E2 = 0x1d, /**< 6 data bits, even parity, 2 stop bits */
QM_UART_LC_6O1 = 0x09, /**< 6 data bits, odd parity, 1 stop bit */
QM_UART_LC_6O2 = 0x0d, /**< 6 data bits, odd parity, 2 stop bits */
QM_UART_LC_7N1 = 0x02, /**< 7 data bits, no parity, 1 stop bit */
QM_UART_LC_7N2 = 0x06, /**< 7 data bits, no parity, 2 stop bits */
QM_UART_LC_7E1 = 0x1a, /**< 7 data bits, even parity, 1 stop bit */
QM_UART_LC_7E2 = 0x1e, /**< 7 data bits, even parity, 2 stop bits */
QM_UART_LC_7O1 = 0x0a, /**< 7 data bits, odd parity, 1 stop bit */
QM_UART_LC_7O2 = 0x0e, /**< 7 data bits, odd parity, 2 stop bits */
QM_UART_LC_8N1 = 0x03, /**< 8 data bits, no parity, 1 stop bit */
QM_UART_LC_8N2 = 0x07, /**< 8 data bits, no parity, 2 stop bits */
QM_UART_LC_8E1 = 0x1b, /**< 8 data bits, even parity, 1 stop bit */
QM_UART_LC_8E2 = 0x1f, /**< 8 data bits, even parity, 2 stop bits */
QM_UART_LC_8O1 = 0x0b, /**< 8 data bits, odd parity, 1 stop bit */
QM_UART_LC_8O2 = 0x0f /**< 8 data bits, odd parity, 2 stop bits */
QM_UART_LC_5N1 = 0x00, /**< 5 data bits, no parity, 1 stop bit. */
QM_UART_LC_5N1_5 = 0x04, /**< 5 data bits, no parity, 1.5 stop bits. */
QM_UART_LC_5E1 = 0x18, /**< 5 data bits, even parity, 1 stop bit. */
QM_UART_LC_5E1_5 = 0x1c, /**< 5 data bits, even par., 1.5 stop bits. */
QM_UART_LC_5O1 = 0x08, /**< 5 data bits, odd parity, 1 stop bit. */
QM_UART_LC_5O1_5 = 0x0c, /**< 5 data bits, odd parity, 1.5 stop bits. */
QM_UART_LC_6N1 = 0x01, /**< 6 data bits, no parity, 1 stop bit. */
QM_UART_LC_6N2 = 0x05, /**< 6 data bits, no parity, 2 stop bits. */
QM_UART_LC_6E1 = 0x19, /**< 6 data bits, even parity, 1 stop bit. */
QM_UART_LC_6E2 = 0x1d, /**< 6 data bits, even parity, 2 stop bits. */
QM_UART_LC_6O1 = 0x09, /**< 6 data bits, odd parity, 1 stop bit. */
QM_UART_LC_6O2 = 0x0d, /**< 6 data bits, odd parity, 2 stop bits. */
QM_UART_LC_7N1 = 0x02, /**< 7 data bits, no parity, 1 stop bit. */
QM_UART_LC_7N2 = 0x06, /**< 7 data bits, no parity, 2 stop bits. */
QM_UART_LC_7E1 = 0x1a, /**< 7 data bits, even parity, 1 stop bit. */
QM_UART_LC_7E2 = 0x1e, /**< 7 data bits, even parity, 2 stop bits. */
QM_UART_LC_7O1 = 0x0a, /**< 7 data bits, odd parity, 1 stop bit. */
QM_UART_LC_7O2 = 0x0e, /**< 7 data bits, odd parity, 2 stop bits. */
QM_UART_LC_8N1 = 0x03, /**< 8 data bits, no parity, 1 stop bit. */
QM_UART_LC_8N2 = 0x07, /**< 8 data bits, no parity, 2 stop bits. */
QM_UART_LC_8E1 = 0x1b, /**< 8 data bits, even parity, 1 stop bit. */
QM_UART_LC_8E2 = 0x1f, /**< 8 data bits, even parity, 2 stop bits. */
QM_UART_LC_8O1 = 0x0b, /**< 8 data bits, odd parity, 1 stop bit. */
QM_UART_LC_8O2 = 0x0f /**< 8 data bits, odd parity, 2 stop bits. */
} qm_uart_lc_t;
/* Masks/offsets for baudrate divisors fields in config structure */
#define QM_UART_CFG_BAUD_DLH_OFFS 16
#define QM_UART_CFG_BAUD_DLL_OFFS 8
#define QM_UART_CFG_BAUD_DLF_OFFS 0
#define QM_UART_CFG_BAUD_DLH_MASK (0xFF << QM_UART_CFG_BAUD_DLH_OFFS)
#define QM_UART_CFG_BAUD_DLL_MASK (0xFF << QM_UART_CFG_BAUD_DLL_OFFS)
#define QM_UART_CFG_BAUD_DLF_MASK (0xFF << QM_UART_CFG_BAUD_DLF_OFFS)
/* Helpers for baudrate divisor packing/unpacking */
#define QM_UART_CFG_BAUD_DL_PACK(dlh, dll, dlf) \
(dlh << QM_UART_CFG_BAUD_DLH_OFFS | dll << QM_UART_CFG_BAUD_DLL_OFFS | \
dlf << QM_UART_CFG_BAUD_DLF_OFFS)
#define QM_UART_CFG_BAUD_DLH_UNPACK(packed) \
((packed & QM_UART_CFG_BAUD_DLH_MASK) >> QM_UART_CFG_BAUD_DLH_OFFS)
#define QM_UART_CFG_BAUD_DLL_UNPACK(packed) \
((packed & QM_UART_CFG_BAUD_DLL_MASK) >> QM_UART_CFG_BAUD_DLL_OFFS)
#define QM_UART_CFG_BAUD_DLF_UNPACK(packed) \
((packed & QM_UART_CFG_BAUD_DLF_MASK) >> QM_UART_CFG_BAUD_DLF_OFFS)
/**
* UART Status type.
*/
typedef enum {
QM_UART_OK = 0,
QM_UART_IDLE = 0,
QM_UART_RX_OE = BIT(1), /* Receiver overrun */
QM_UART_RX_PE = BIT(2), /* Parity error */
QM_UART_RX_FE = BIT(3), /* Framing error */
QM_UART_RX_BI = BIT(4), /* Break interrupt */
QM_UART_TX_BUSY = BIT(5),
QM_UART_RX_BUSY = BIT(6),
QM_UART_TX_NFULL = BIT(7), /* TX FIFO not full */
QM_UART_RX_NEMPTY = BIT(8), /* RX FIFO not empty */
QM_UART_EINVAL = BIT(31), /* Invalid input parameter */
QM_UART_IDLE = 0, /**< IDLE. */
QM_UART_RX_OE = BIT(1), /**< Receiver overrun. */
QM_UART_RX_PE = BIT(2), /**< Parity error. */
QM_UART_RX_FE = BIT(3), /**< Framing error. */
QM_UART_RX_BI = BIT(4), /**< Break interrupt. */
QM_UART_TX_BUSY = BIT(5), /**< TX Busy flag. */
QM_UART_RX_BUSY = BIT(6), /**< RX Busy flag. */
QM_UART_TX_NFULL = BIT(7), /**< TX FIFO not full. */
QM_UART_RX_NEMPTY = BIT(8), /**< RX FIFO not empty. */
} qm_uart_status_t;
/**
* UART configuration type.
*/
typedef struct {
qm_uart_lc_t line_control;
uint32_t baud_divisor;
bool hw_fc;
bool int_en;
qm_uart_lc_t line_control; /**< Line control (enum). */
uint32_t baud_divisor; /**< Baud Divisor. */
bool hw_fc; /**< Hardware Automatic Flow Control. */
bool int_en; /**< Interrupt enable. */
} qm_uart_config_t;
/**
* UART IRQ transfer structure, holds pre-allocated write and read buffers.
* Also pointers to user defined callbacks for write, read and errors.
* UART asynchronous transfer structure.
*/
typedef struct {
uint8_t *data;
uint32_t data_len;
void (*fin_callback)(uint32_t id, uint32_t len);
void (*err_callback)(uint32_t id, qm_uart_status_t status);
uint32_t id;
uint8_t *data; /**< Pre-allocated write or read buffer. */
uint32_t data_len; /**< Number of bytes to transfer. */
/** Transfer callback
*
* @param[in] data Callback user data.
* @param[in] error 0 on success.
* Negative @ref errno for possible error codes.
* @param[in] status UART module status
* @param[in] len Length of the UART transfer if successful, 0
* otherwise.
*/
void (*callback)(void *data, int error, qm_uart_status_t status,
uint32_t len);
void *callback_data; /**< Callback identifier. */
} qm_uart_transfer_t;
/**
* UART 0 Interrupt Service Routine.
*/
void qm_uart_0_isr(void);
/**
* UART 1 Interrupt Service Routine.
*/
void qm_uart_1_isr(void);
/**
* Set UART configuration.
*
* Change the configuration of a UART module. This includes line control,
* baud rate and hardware flow control.
*
* @brief Set UART confguration.
* @param[in] uart Which UART module to configure.
* @param[in] cfg New configuration for UART.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
*/
qm_rc_t qm_uart_set_config(const qm_uart_t uart,
const qm_uart_config_t *const cfg);
/**
* Read the current configuration of a UART module. This includes line
* control, baud rate and hardware flow control.
* @param[in] cfg New configuration for UART. This must not be NULL.
*
* @brief Get UART confguration.
* @param[in] uart Which UART module to read the configuration of.
* @param[in] cfg Current configuration for UART.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_uart_get_config(const qm_uart_t uart, qm_uart_config_t *const cfg);
int qm_uart_set_config(const qm_uart_t uart, const qm_uart_config_t *const cfg);
/**
* Get UART bus status.
*
* Retrieve UART interface status. Return QM_UART_BUSY if transmitting
* data; QM_UART_IDLE if available for transfer QM_UART_TX_ERROR if an
* error has occurred in transmission.
*
* @brief Get UART bus status.
* @param[in] uart Which UART to read the status of.
* @return qm_uart_status_t Returns UART specific return code.
* @param[out] status UART specific status. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_uart_status_t qm_uart_get_status(const qm_uart_t uart);
int qm_uart_get_status(const qm_uart_t uart, qm_uart_status_t *const status);
/**
* UART character data write.
*
* Perform a single character write on the UART interface.
* This is a blocking synchronous call.
*
* @brief UART character data write.
* @param [in] uart UART index.
* @param [in] data Data to write to UART.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
* @param[in] uart UART index.
* @param[in] data Data to write to UART.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_uart_write(const qm_uart_t uart, const uint8_t data);
int qm_uart_write(const qm_uart_t uart, const uint8_t data);
/**
* UART character data read.
*
* Perform a single character read from the UART interface.
* This is a blocking synchronous call.
*
* @brief UART character data read.
* @param [in] uart UART index.
* @param [out] data Data to read from UART.
* @return qm_uart_status_t Returns UART specific return code.
* @param[in] uart UART index.
* @param[out] data Data to read from UART. This must not be NULL.
* @param[out] status UART specific status.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_uart_status_t qm_uart_read(const qm_uart_t uart, uint8_t *data);
int qm_uart_read(const qm_uart_t uart, uint8_t *const data,
qm_uart_status_t *const status);
/**
* UART character data write.
*
* Perform a single character write on the UART interface.
* This is a non-blocking synchronous call.
*
* @brief UART character data write.
* @param [in] uart UART index.
* @param [in] data Data to write to UART.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
* @param[in] uart UART index.
* @param[in] data Data to write to UART.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_uart_write_non_block(const qm_uart_t uart, const uint8_t data);
int qm_uart_write_non_block(const qm_uart_t uart, const uint8_t data);
/**
* UART character data read.
*
* Perform a single character read from the UART interface.
* This is a non-blocking synchronous call.
*
* @brief UART character data read.
* @param [in] uart UART index.
* @return uint8_t Character read.
* @param[in] uart UART index.
* @param[out] data Character read. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
uint8_t qm_uart_read_non_block(const qm_uart_t uart);
int qm_uart_read_non_block(const qm_uart_t uart, uint8_t *const data);
/**
* UART multi-byte data write.
*
* Perform a write on the UART interface. This is a blocking
* synchronous call. The function will block until all data has
* been transferred.
*
* @brief UART multi-byte data write.
* @param [in] uart UART index.
* @param [in] data Data to write to UART.
* @param [in] len Length of data to write to UART.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
* @param[in] uart UART index.
* @param[in] data Data to write to UART. This must not be NULL.
* @param[in] len Length of data to write to UART.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_uart_write_buffer(const qm_uart_t uart, const uint8_t *const data,
uint32_t len);
int qm_uart_write_buffer(const qm_uart_t uart, const uint8_t *const data,
uint32_t len);
/**
* Interrupt based TX on UART.
*
* Perform an interrupt based TX transfer on the UART bus. The function
* will replenish the TX FIFOs on UART empty interrupts.
*
* @brief Interrupt based TX on UART.
* @param [in] uart UART index.
* @param [in] xfer Structure containing pre-allocated write buffer and callback
* functions. The callbacks cannot be null.
* @return qm_uart_status_t Returns UART specific return code.
* @param[in] uart UART index.
* @param[in] xfer Structure containing pre-allocated
* write buffer and callback functions.
* This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_uart_status_t qm_uart_irq_write(const qm_uart_t uart,
const qm_uart_transfer_t *const xfer);
int qm_uart_irq_write(const qm_uart_t uart,
const qm_uart_transfer_t *const xfer);
/**
* Interrupt based RX on UART.
*
* Perform an interrupt based RX transfer on the UART bus. The function
* will read back the RX FIFOs on UART empty interrupts.
*
* @brief Interrupt based RX on UART.
* @param [in] uart UART register block pointer.
* @param [in] xfer Structure containing pre-allocated read
* buffer and callback functions. The callbacks cannot be null.
* @return qm_uart_status_t Returns UART specific return code.
* @param[in] uart UART index.
* @param[in] xfer Structure containing pre-allocated read
* buffer and callback functions.
* This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_uart_status_t qm_uart_irq_read(const qm_uart_t uart,
const qm_uart_transfer_t *const xfer);
int qm_uart_irq_read(const qm_uart_t uart,
const qm_uart_transfer_t *const xfer);
/**
* Terminate the current IRQ or DMA TX transfer on the UART bus.
* Terminate UART IRQ TX transfer.
*
* Terminate the current IRQ TX transfer on the UART bus.
* This will cause the relevant callbacks to be called.
*
* @brief Terminate UART IRQ/DMA TX transfer.
* @param [in] uart UART index.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
* @param[in] uart UART index.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_uart_write_terminate(const qm_uart_t uart);
int qm_uart_irq_write_terminate(const qm_uart_t uart);
/**
* Terminate the current IRQ or DMA RX transfer on the UART bus.
* Terminate UART IRQ RX transfer.
*
* Terminate the current IRQ RX transfer on the UART bus.
* This will cause the relevant callbacks to be called.
*
* @brief Terminate UART IRQ/DMA RX transfer.
* @param [in] uart UART index.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
* @param[in] uart UART index.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_uart_read_terminate(const qm_uart_t uart);
int qm_uart_irq_read_terminate(const qm_uart_t uart);
/**
* Configure a DMA channel with a specific transfer direction.
*
* The user is responsible for managing the allocation of the pool
* of DMA channels provided by each DMA core to the different
* peripheral drivers that require them.
*
* This function configures DMA channel parameters that are unlikely to change
* between transfers, like transaction width, burst size, and handshake
* interface parameters. The user will likely only call this function once for
* the lifetime of an application unless the channel needs to be repurposed.
*
* Note that qm_dma_init() must first be called before configuring a channel.
*
* @param[in] uart UART index.
* @param[in] dma_ctrl_id DMA controller identifier.
* @param[in] dma_channel_id DMA channel identifier.
* @param[in] dma_channel_direction DMA channel direction, either
* QM_DMA_MEMORY_TO_PERIPHERAL (write transfer) or QM_DMA_PERIPHERAL_TO_MEMORY
* (read transfer).
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_uart_dma_channel_config(
const qm_uart_t uart, const qm_dma_t dma_ctrl_id,
const qm_dma_channel_id_t dma_channel_id,
const qm_dma_channel_direction_t dma_channel_direction);
/**
* Perform a DMA-based TX transfer on the UART bus.
*
* In order for this call to succeed, previously the user
* must have configured a DMA channel with direction
* QM_DMA_MEMORY_TO_PERIPHERAL to be used on this UART, calling
* qm_uart_dma_channel_config(). The transfer length is limited to 4KB.
*
* @param[in] uart UART index.
* @param[in] xfer Structure containing a pre-allocated write buffer
* and callback functions.
* This must not be NULL.
* Callback pointer must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_uart_dma_write(const qm_uart_t uart,
const qm_uart_transfer_t *const xfer);
/**
* Perform a DMA-based RX transfer on the UART bus.
*
* In order for this call to succeed, previously the user
* must have configured a DMA channel with direction
* QM_DMA_PERIPHERAL_TO_MEMORY to be used on this UART, calling
* qm_uart_dma_channel_config(). The transfer length is limited to 4KB.
*
* @param[in] uart UART index.
* @param[in] xfer Structure containing a pre-allocated read buffer
* and callback functions.
* This must not be NULL.
* Callback pointer must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_uart_dma_read(const qm_uart_t uart,
const qm_uart_transfer_t *const xfer);
/**
* Terminate the current DMA TX transfer on the UART bus.
*
* This will cause the relevant callbacks to be called.
*
* @param[in] uart UART index.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_uart_dma_write_terminate(const qm_uart_t uart);
/**
* Terminate the current DMA RX transfer on the UART bus.
*
* This will cause the relevant callbacks to be called.
*
* @param[in] uart UART index.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_uart_dma_read_terminate(const qm_uart_t uart);
/**
* @}

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@ -31,6 +31,7 @@
#define __QM_VERSION_H__
#include "qm_common.h"
#include "qm_soc_regs.h"
/**
* Version number functions for API.
@ -39,21 +40,6 @@
* @{
*/
/**
* QM API major version number
*/
#define QM_VER_API_MAJOR 1
/**
* QM API minor version number
*/
#define QM_VER_API_MINOR 0
/**
* QM API patch version number
*/
#define QM_VER_API_PATCH 1
/**
* Create a single version number from the major, minor and patch numbers
*/
@ -67,6 +53,15 @@
#define QM_VER_API_STRING \
QM_VER_STRINGIFY(QM_VER_API_MAJOR, QM_VER_API_MINOR, QM_VER_API_PATCH)
/**
* Get the ROM version number.
*
* Reads the ROM version information from flash and returns it.
*
* @return uint32_t ROM version.
*/
uint32_t qm_ver_rom(void);
/**
* @}
*/

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@ -34,95 +34,123 @@
#include "qm_soc_regs.h"
/**
* Watchdog timer for Quark Microcontrollers.
* Watchdog timer.
*
* @defgroup groupWDT WDT
* @{
*/
/** Watchdog enable. */
#define QM_WDT_ENABLE (BIT(0))
/** Watchdog mode. */
#define QM_WDT_MODE (BIT(1))
/** Watchdog mode offset. */
#define QM_WDT_MODE_OFFSET (1)
/** Watchdog Timeout Mask. */
#define QM_WDT_TIMEOUT_MASK (0xF)
/**
* WDT Mode type.
*/
typedef enum { QM_WDT_MODE_RESET, QM_WDT_MODE_INTERRUPT_RESET } qm_wdt_mode_t;
typedef enum {
/** Watchdog Reset Response Mode.
*
* The watchdog will request a SoC Warm Reset on a timeout.
*/
QM_WDT_MODE_RESET,
/** Watchdog Interrupt Reset Response Mode.
*
* The watchdog will generate an interrupt on first timeout.
* If interrupt has not been cleared by the second timeout
* the watchdog will then request a SoC Warm Reset.
*/
QM_WDT_MODE_INTERRUPT_RESET
} qm_wdt_mode_t;
/**
* WDT clock cycles for timeout type. This value is a power of 2.
*/
typedef enum {
QM_WDT_2_POW_16_CYCLES,
QM_WDT_2_POW_17_CYCLES,
QM_WDT_2_POW_18_CYCLES,
QM_WDT_2_POW_19_CYCLES,
QM_WDT_2_POW_20_CYCLES,
QM_WDT_2_POW_21_CYCLES,
QM_WDT_2_POW_22_CYCLES,
QM_WDT_2_POW_23_CYCLES,
QM_WDT_2_POW_24_CYCLES,
QM_WDT_2_POW_25_CYCLES,
QM_WDT_2_POW_26_CYCLES,
QM_WDT_2_POW_27_CYCLES,
QM_WDT_2_POW_28_CYCLES,
QM_WDT_2_POW_29_CYCLES,
QM_WDT_2_POW_30_CYCLES,
QM_WDT_2_POW_31_CYCLES
QM_WDT_2_POW_16_CYCLES, /**< 16 clock cycles timeout. */
QM_WDT_2_POW_17_CYCLES, /**< 17 clock cycles timeout. */
QM_WDT_2_POW_18_CYCLES, /**< 18 clock cycles timeout. */
QM_WDT_2_POW_19_CYCLES, /**< 19 clock cycles timeout. */
QM_WDT_2_POW_20_CYCLES, /**< 20 clock cycles timeout. */
QM_WDT_2_POW_21_CYCLES, /**< 21 clock cycles timeout. */
QM_WDT_2_POW_22_CYCLES, /**< 22 clock cycles timeout. */
QM_WDT_2_POW_23_CYCLES, /**< 23 clock cycles timeout. */
QM_WDT_2_POW_24_CYCLES, /**< 24 clock cycles timeout. */
QM_WDT_2_POW_25_CYCLES, /**< 25 clock cycles timeout. */
QM_WDT_2_POW_26_CYCLES, /**< 26 clock cycles timeout. */
QM_WDT_2_POW_27_CYCLES, /**< 27 clock cycles timeout. */
QM_WDT_2_POW_28_CYCLES, /**< 28 clock cycles timeout. */
QM_WDT_2_POW_29_CYCLES, /**< 29 clock cycles timeout. */
QM_WDT_2_POW_30_CYCLES, /**< 30 clock cycles timeout. */
QM_WDT_2_POW_31_CYCLES, /**< 31 clock cycles timeout. */
QM_WDT_2_POW_CYCLES_NUM
} qm_wdt_clock_timeout_cycles_t;
/**
* QM WDT configuration type.
*/
typedef struct {
qm_wdt_clock_timeout_cycles_t timeout;
qm_wdt_mode_t mode;
void (*callback)(void);
qm_wdt_clock_timeout_cycles_t timeout; /**< Timeout in cycles. */
qm_wdt_mode_t mode; /**< Watchdog response mode. */
/**
* User callback.
*
* param[in] data Callback user data.
*/
void (*callback)(void *data);
void *callback_data; /**< Callback user data. */
} qm_wdt_config_t;
/**
* Start WDT. Once started, WDT can only be stopped by a SoC reset.
* Start WDT.
*
* @param [in] wdt WDT index.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
* Once started, WDT can only be stopped by a SoC reset.
*
* @param[in] wdt WDT index.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_wdt_start(const qm_wdt_t wdt);
int qm_wdt_start(const qm_wdt_t wdt);
/**
* Set configuration of WDT module. This includes the timeout period in PCLK
* cycles, the WDT mode of operation. It also registers an ISR to the user
* defined callback.
* Set configuration of WDT module.
*
* @param [in] wdt WDT index.
* This includes the timeout period in PCLK cycles, the WDT mode of operation.
* It also registers an ISR to the user defined callback.
*
* @param[in] wdt WDT index.
* @param[in] cfg New configuration for WDT.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
*/
qm_rc_t qm_wdt_set_config(const qm_wdt_t wdt, const qm_wdt_config_t *const cfg);
/**
* Get the current configuration of WDT module. This includes the
* timeout period in PCLK cycles, the WDT mode of operation.
* This must not be NULL.
* If QM_WDT_MODE_INTERRUPT_RESET mode is set,
* the 'callback' cannot be null.
*
* @param [in] wdt WDT index.
* @param[out] cfg Parameter to be set with the current WDT configuration.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_wdt_get_config(const qm_wdt_t wdt, qm_wdt_config_t *const cfg);
int qm_wdt_set_config(const qm_wdt_t wdt, const qm_wdt_config_t *const cfg);
/**
* Reload the WDT counter with safety value, i.e. service the watchdog
* Reload the WDT counter.
*
* @param [in] wdt WDT index.
* @return qm_rc_t QM_RC_OK on success, error code otherwise.
* Reload the WDT counter with safety value, i.e. service the watchdog.
*
* @param[in] wdt WDT index.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t qm_wdt_reload(const qm_wdt_t wdt);
/**
* WDT Interrupt Service Routine
*/
void qm_wdt_isr_0(void);
int qm_wdt_reload(const qm_wdt_t wdt);
/**
* @}

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@ -27,8 +27,8 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __QM_RAR_H__
#define __QM_RAR_H__
#ifndef __RAR_H__
#define __RAR_H__
#include "qm_common.h"
#include "qm_soc_regs.h"
@ -45,23 +45,28 @@
* RAR modes type.
*/
typedef enum {
RAR_NORMAL, /**< Normal mode = 50 mA */
RAR_RETENTION /**< Retention mode = 300 uA */
RAR_NORMAL, /**< Normal mode = 50 mA. */
RAR_RETENTION /**< Retention mode = 300 uA. */
} rar_state_t;
/**
* Change operating mode of the RAR. Normal mode is able to source up to 50 mA.
* Retention mode is able to source up to 300 uA. Care must be taken when
* entering into retention mode to ensure the overall system draw is less than
* 300 uA.
* Change operating mode of RAR.
*
* @brief change operating mode of RAR
* @param [in] mode Operating mode of the RAR
* Normal mode is able to source up to 50 mA.
* Retention mode is able to source up to 300 uA.
* Care must be taken when entering into retention mode
* to ensure the overall system draw is less than 300 uA.
*
* @param[in] mode Operating mode of the RAR.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
qm_rc_t rar_set_mode(rar_state_t mode);
int rar_set_mode(const rar_state_t mode);
/**
* @}
*/
#endif /* HAS_RAR */
#endif /* __QM_RAR_H__ */
#endif /* __RAR_H__ */

130
drivers/qmsi/drivers/mvic.h Normal file
View file

@ -0,0 +1,130 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __MVIC_H__
#define __MVIC_H__
#include <stdint.h>
#include "qm_common.h"
#include "qm_soc_regs.h"
#define NUM_IRQ_LINES (32)
static uint32_t _mvic_get_irq_val(unsigned int irq)
{
/* Register Select - select which IRQ line we are configuring
* Bits 0 and 4 are reserved
* So, for IRQ 15 ( 0x01111 ) write 0x101110
*/
QM_IOAPIC->ioregsel.reg = ((irq & 0x7) << 1) | ((irq & 0x18) << 2);
return QM_IOAPIC->iowin.reg;
}
static void _mvic_set_irq_val(unsigned int irq, uint32_t value)
{
/* Register Select - select which IRQ line we are configuring
* Bits 0 and 4 are reserved
* So, for IRQ 15 ( 0x01111 ) write 0x101110
*/
QM_IOAPIC->ioregsel.reg = ((irq & 0x7) << 1) | ((irq & 0x18) << 2);
QM_IOAPIC->iowin.reg = value;
}
/**
* Initialise MVIC.
*/
static __inline__ void mvic_init(void)
{
uint32_t i;
for (i = 0; i < NUM_IRQ_LINES; i++) {
/* Clear up any spurious LAPIC interrupts, each call only
* clears one bit.
*/
QM_MVIC->eoi.reg = 0;
/* Mask interrupt */
_mvic_set_irq_val(i, BIT(16));
}
}
/**
* Register IRQ with MVIC.
*
* @param irq IRQ to register.
*/
static __inline__ void mvic_register_irq(uint32_t irq)
{
/* Set IRQ triggering scheme and unmask the line. */
switch (irq) {
case QM_IRQ_RTC_0:
case QM_IRQ_AONPT_0:
case QM_IRQ_PIC_TIMER:
case QM_IRQ_WDT_0:
/* positive edge */
_mvic_set_irq_val(irq, 0);
break;
default:
/* high level */
_mvic_set_irq_val(irq, BIT(15));
break;
}
}
/**
* Unmask IRQ with MVIC.
*
* @param irq IRQ to unmask.
*/
static __inline__ void mvic_unmask_irq(uint32_t irq)
{
uint32_t value = _mvic_get_irq_val(irq);
value &= ~BIT(16);
_mvic_set_irq_val(irq, value);
}
/**
* Mask IRQ with MVIC.
*
* @param irq IRQ to mask.
*/
static __inline__ void mvic_mask_irq(uint32_t irq)
{
uint32_t value = _mvic_get_irq_val(irq);
value |= BIT(16);
_mvic_set_irq_val(irq, value);
}
#endif /* __MVIC_H__ */

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@ -28,30 +28,43 @@
*/
#include "qm_adc.h"
#include "qm_scss.h"
#include "clk.h"
#include <string.h>
#if (QUARK_D2000)
#define SAMPLE_BLOCK_LEN (16)
/* FIFO_INTERRUPT_THRESHOLD is used by qm_adc_irq_convert to set the threshold
* at which the FIFO will trigger an interrupt. */
#define FIFO_INTERRUPT_THRESHOLD (16)
#define QM_ADC_CHAN_SEQ_MAX (32)
/* ADC commands */
/* ADC commands. */
#define QM_ADC_CMD_START_SINGLE (0)
#define QM_ADC_CMD_START_CONT (1)
#define QM_ADC_CMD_RESET_CAL (2)
#define QM_ADC_CMD_START_CAL (3)
#define QM_ADC_CMD_LOAD_CAL (4)
#define QM_ADC_CMD_STOP_CONT (5)
/* It is neceesary to store some of the fields within the ADC_CMD as writing
* them will immediately kick off a conversion or calibration
*/
static uint32_t adc_cmd[QM_ADC_NUM];
static uint8_t sample_window[QM_ADC_NUM];
static qm_adc_resolution_t resolution[QM_ADC_NUM];
static qm_adc_xfer_t irq_xfer[QM_ADC_NUM];
static uint32_t count[QM_ADC_NUM];
static bool dummy_conversion = false;
/* ISR handler */
/* Callbacks for mode change and calibration. */
static void (*mode_callback[QM_ADC_NUM])(void *data, int error,
qm_adc_status_t status,
qm_adc_cb_source_t source);
static void (*cal_callback[QM_ADC_NUM])(void *data, int error,
qm_adc_status_t status,
qm_adc_cb_source_t source);
static void *mode_callback_data[QM_ADC_NUM];
static void *cal_callback_data[QM_ADC_NUM];
/* ISR handler for command/calibration complete. */
static void qm_adc_isr_handler(const qm_adc_t adc)
{
uint32_t int_status = 0;
@ -59,221 +72,383 @@ static void qm_adc_isr_handler(const qm_adc_t adc)
int_status = QM_ADC[adc].adc_intr_status;
/* FIFO overrun interrupt */
/* FIFO overrun interrupt. */
if (int_status & QM_ADC_INTR_STATUS_FO) {
/* Stop the transfer */
/* Stop the transfer. */
QM_ADC[adc].adc_cmd = QM_ADC_CMD_STOP_CONT;
/* Disable all interrupts */
/* Disable all interrupts. */
QM_ADC[adc].adc_intr_enable = 0;
/* Call the user callback */
irq_xfer[adc].error_callback();
/* Call the user callback. */
if (irq_xfer[adc].callback) {
irq_xfer[adc].callback(irq_xfer[adc].callback_data,
-EIO, QM_ADC_OVERFLOW,
QM_ADC_TRANSFER);
}
}
/* Continuous mode command complete interrupt */
/* Continuous mode command complete interrupt. */
if (int_status & QM_ADC_INTR_STATUS_CONT_CC) {
/* Clear the interrupt */
/* Clear the interrupt. */
QM_ADC[adc].adc_intr_status &= QM_ADC_INTR_STATUS_CONT_CC;
/* Figure out how many samples to read */
if ((count[adc] + SAMPLE_BLOCK_LEN) <= irq_xfer->samples_len) {
samples_to_read = SAMPLE_BLOCK_LEN;
} else {
samples_to_read = irq_xfer->samples_len - count[adc];
/* Calculate the number of samples to read. */
samples_to_read = QM_ADC[adc].adc_fifo_count;
if (samples_to_read >
(irq_xfer[adc].samples_len - count[adc])) {
samples_to_read =
irq_xfer[adc].samples_len - count[adc];
}
/* Copy data out of FIFO */
/* Copy data out of FIFO. The sample must be shifted right by
* 2, 4 or 6 bits for 10, 8 and 6 bit resolution respectively
* to get the correct value. */
for (i = 0; i < samples_to_read; i++) {
irq_xfer->samples[count[adc]] = QM_ADC[adc].adc_sample;
irq_xfer[adc].samples[count[adc]] =
(QM_ADC[adc].adc_sample >>
(2 * (3 - resolution[adc])));
count[adc]++;
}
/* Check if we have the requested number of samples, stop the
* conversion and call the user callback function. */
if (count[adc] == irq_xfer[adc].samples_len) {
/* Stop the transfer. */
QM_ADC[adc].adc_cmd = QM_ADC_CMD_STOP_CONT;
/* Disable all interrupts. */
QM_ADC[adc].adc_intr_enable = 0;
/* Call the user callback. */
if (irq_xfer[adc].callback) {
irq_xfer[adc].callback(
irq_xfer[adc].callback_data, 0,
QM_ADC_COMPLETE, QM_ADC_TRANSFER);
}
}
}
/* Check if we have the requested number of samples, stop the conversion
* and call the user callback function */
if (count[adc] == irq_xfer->samples_len) {
/* Stop the transfer */
QM_ADC[adc].adc_cmd = QM_ADC_CMD_STOP_CONT;
/* Disable all interrupts */
QM_ADC[adc].adc_intr_enable = 0;
/* Call the user callback */
irq_xfer[adc].complete_callback();
/* The Command Complete interrupt is currently used to notify of the
* completion of a calibration command or a dummy conversion. */
if ((int_status & QM_ADC_INTR_STATUS_CC) && (!dummy_conversion)) {
/* Disable and clear the Command Complete interrupt */
QM_ADC[adc].adc_intr_enable &= ~QM_ADC_INTR_ENABLE_CC;
QM_ADC[adc].adc_intr_status = QM_ADC_INTR_STATUS_CC;
/* Call the user callback if it is set. */
if (cal_callback[adc]) {
cal_callback[adc](irq_xfer[adc].callback_data, 0,
QM_ADC_IDLE, QM_ADC_CAL_COMPLETE);
}
}
/* We do not currently handle the Command Complete interrupt as it is
* not being used anywhere */
/* This dummy conversion is needed when switching to normal mode or
* normal mode with calibration. */
if ((int_status & QM_ADC_INTR_STATUS_CC) && (dummy_conversion)) {
/* Flush the FIFO to get rid of the dummy values. */
QM_ADC[adc].adc_sample = QM_ADC_FIFO_CLEAR;
/* Disable and clear the Command Complete interrupt. */
QM_ADC[adc].adc_intr_enable &= ~QM_ADC_INTR_ENABLE_CC;
QM_ADC[adc].adc_intr_status = QM_ADC_INTR_STATUS_CC;
dummy_conversion = false;
/* Call the user callback if it is set. */
if (mode_callback[adc]) {
mode_callback[adc](irq_xfer[adc].callback_data, 0,
QM_ADC_IDLE, QM_ADC_MODE_CHANGED);
}
}
}
/* ISR for ADC 0 */
void qm_adc_0_isr(void)
/* ISR handler for mode change. */
static void qm_adc_pwr_0_isr_handler(const qm_adc_t adc)
{
/* Clear the interrupt. Note that this operates differently to the
* QM_ADC_INTR_STATUS regiseter because you have to write to the
* QM_ADC_OP_MODE register, Interrupt Enable bit to clear. */
QM_ADC[adc].adc_op_mode &= ~QM_ADC_OP_MODE_IE;
/* Perform a dummy conversion if we are transitioning to Normal Mode or
* Normal Mode With Calibration */
if ((QM_ADC[adc].adc_op_mode & QM_ADC_OP_MODE_OM_MASK) >=
QM_ADC_MODE_NORM_CAL) {
/* Set the first sequence register back to its default (ch 0) */
QM_ADC[adc].adc_seq0 = QM_ADC_CAL_SEQ_TABLE_DEFAULT;
/* Clear the command complete interrupt status field */
QM_ADC[adc].adc_intr_status = QM_ADC_INTR_STATUS_CC;
dummy_conversion = true;
/* Run a dummy conversion */
QM_ADC[adc].adc_cmd = (QM_ADC_CMD_IE | QM_ADC_CMD_START_SINGLE);
} else {
/* Call the user callback function */
if (mode_callback[adc]) {
mode_callback[adc](irq_xfer[adc].callback_data, 0,
QM_ADC_IDLE, QM_ADC_MODE_CHANGED);
}
}
}
/* ISR for ADC 0 Command/Calibration Complete. */
QM_ISR_DECLARE(qm_adc_0_isr)
{
qm_adc_isr_handler(QM_ADC_0);
QM_ISR_EOI(QM_IRQ_ADC_0_VECTOR);
}
/* ISR for ADC 0 Mode Change. */
QM_ISR_DECLARE(qm_adc_pwr_0_isr)
{
qm_adc_pwr_0_isr_handler(QM_ADC_0);
QM_ISR_EOI(QM_IRQ_ADC_PWR_0_VECTOR);
}
static void setup_seq_table(const qm_adc_t adc, qm_adc_xfer_t *xfer)
{
uint32_t i, offset = 0;
volatile uint32_t *reg_pointer = NULL;
/* Loop over all of the channels to be added */
/* Loop over all of the channels to be added. */
for (i = 0; i < xfer->ch_len; i++) {
/* Get a pointer to the correct address */
/* Get a pointer to the correct address. */
reg_pointer = &QM_ADC[adc].adc_seq0 + (i / 4);
/* Get the offset within the register */
offset = ((i % 4) * 8);
/* Clear the Last bit from all entries we will use */
/* Clear the Last bit from all entries we will use. */
*reg_pointer &= ~(1 << (offset + 7));
/* Place the channel numnber into the sequence table */
/* Place the channel numnber into the sequence table. */
*reg_pointer |= (xfer->ch[i] << offset);
}
if (reg_pointer) {
/* Set the correct Last bit */
/* Set the correct Last bit. */
*reg_pointer |= (1 << (offset + 7));
}
}
qm_rc_t qm_adc_calibrate(const qm_adc_t adc)
int qm_adc_calibrate(const qm_adc_t adc)
{
QM_CHECK(adc < QM_ADC_NUM, QM_RC_EINVAL);
QM_CHECK(adc < QM_ADC_NUM, -EINVAL);
/* Clear the command complete interrupt status field */
/* Clear the command complete interrupt status field. */
QM_ADC[adc].adc_intr_status = QM_ADC_INTR_STATUS_CC;
/* Start the calibration and wait for it to complete */
/* Start the calibration and wait for it to complete. */
QM_ADC[adc].adc_cmd = (QM_ADC_CMD_IE | QM_ADC_CMD_START_CAL);
while (!(QM_ADC[adc].adc_intr_status & QM_ADC_INTR_STATUS_CC))
;
/* Clear the command complete interrupt status field again */
/* Clear the command complete interrupt status field again. */
QM_ADC[adc].adc_intr_status = QM_ADC_INTR_STATUS_CC;
return QM_RC_OK;
return 0;
}
qm_rc_t qm_adc_set_mode(const qm_adc_t adc, const qm_adc_mode_t mode)
int qm_adc_irq_calibrate(const qm_adc_t adc,
void (*callback)(void *data, int error,
qm_adc_status_t status,
qm_adc_cb_source_t source),
void *callback_data)
{
QM_CHECK(adc < QM_ADC_NUM, QM_RC_EINVAL);
QM_CHECK(mode <= QM_ADC_MODE_NORM_NO_CAL, QM_RC_EINVAL);
QM_CHECK(adc < QM_ADC_NUM, -EINVAL);
/* Issue mode change command and wait for it to complete */
/* Set the callback. */
cal_callback[adc] = callback;
cal_callback_data[adc] = callback_data;
/* Clear and enable the command complete interupt. */
QM_ADC[adc].adc_intr_status = QM_ADC_INTR_STATUS_CC;
QM_ADC[adc].adc_intr_enable |= QM_ADC_INTR_ENABLE_CC;
/* Start the calibration */
QM_ADC[adc].adc_cmd = (QM_ADC_CMD_IE | QM_ADC_CMD_START_CAL);
return 0;
}
int qm_adc_set_calibration(const qm_adc_t adc, const qm_adc_calibration_t cal)
{
QM_CHECK(adc < QM_ADC_NUM, -EINVAL);
QM_CHECK(cal < 0x3F, -EINVAL);
/* Clear the command complete interrupt status field. */
QM_ADC[adc].adc_intr_status = QM_ADC_INTR_STATUS_CC;
/* Set the calibration data and wait for it to complete. */
QM_ADC[adc].adc_cmd = ((cal << QM_ADC_CMD_CAL_DATA_OFFSET) |
QM_ADC_CMD_IE | QM_ADC_CMD_LOAD_CAL);
while (!(QM_ADC[adc].adc_intr_status & QM_ADC_INTR_STATUS_CC))
;
/* Clear the command complete interrupt status field again. */
QM_ADC[adc].adc_intr_status = QM_ADC_INTR_STATUS_CC;
return 0;
}
int qm_adc_get_calibration(const qm_adc_t adc, qm_adc_calibration_t *const cal)
{
QM_CHECK(adc < QM_ADC_NUM, -EINVAL);
QM_CHECK(NULL != cal, -EINVAL);
*cal = QM_ADC[adc].adc_calibration;
return 0;
}
int qm_adc_set_mode(const qm_adc_t adc, const qm_adc_mode_t mode)
{
QM_CHECK(adc < QM_ADC_NUM, -EINVAL);
QM_CHECK(mode <= QM_ADC_MODE_NORM_NO_CAL, -EINVAL);
/* Issue mode change command and wait for it to complete. */
QM_ADC[adc].adc_op_mode = mode;
while ((QM_ADC[adc].adc_op_mode & QM_ADC_OP_MODE_OM_MASK) != mode)
;
/* Perform a dummy conversion if we are transitioning to Normal Mode */
/* Perform a dummy conversion if we are transitioning to Normal Mode. */
if ((mode >= QM_ADC_MODE_NORM_CAL)) {
/* Set the first sequence register back to its default (ch 0) */
/* Set the first sequence register back to its default. */
QM_ADC[adc].adc_seq0 = QM_ADC_CAL_SEQ_TABLE_DEFAULT;
/* Clear the command complete interrupt status field */
/* Clear the command complete interrupt status field. */
QM_ADC[adc].adc_intr_status = QM_ADC_INTR_STATUS_CC;
/* Run a dummy convert and wait for it to complete */
/* Run a dummy convert and wait for it to complete. */
QM_ADC[adc].adc_cmd = (QM_ADC_CMD_IE | QM_ADC_CMD_START_SINGLE);
while (!(QM_ADC[adc].adc_intr_status & QM_ADC_INTR_STATUS_CC))
;
/* Flush the FIFO to get rid of the dummy values */
/* Flush the FIFO to get rid of the dummy values. */
QM_ADC[adc].adc_sample = QM_ADC_FIFO_CLEAR;
/* Clear the command complete interrupt status field */
/* Clear the command complete interrupt status field. */
QM_ADC[adc].adc_intr_status = QM_ADC_INTR_STATUS_CC;
}
return QM_RC_OK;
return 0;
}
qm_rc_t qm_adc_set_config(const qm_adc_t adc, const qm_adc_config_t *const cfg)
int qm_adc_irq_set_mode(const qm_adc_t adc, const qm_adc_mode_t mode,
void (*callback)(void *data, int error,
qm_adc_status_t status,
qm_adc_cb_source_t source),
void *callback_data)
{
QM_CHECK(adc < QM_ADC_NUM, QM_RC_EINVAL);
QM_CHECK(NULL != cfg, QM_RC_EINVAL);
/* QM_CHECK(cfg->window > 255, QM_RC_EINVAL); unnecessary - uint8_t */
QM_CHECK(cfg->resolution <= QM_ADC_RES_12_BITS, QM_RC_EINVAL);
QM_CHECK(cfg->window >= cfg->resolution + 2, QM_RC_EINVAL);
QM_CHECK(adc < QM_ADC_NUM, -EINVAL);
QM_CHECK(mode <= QM_ADC_MODE_NORM_NO_CAL, -EINVAL);
/* Set the sample window and resolution */
adc_cmd[adc] = ((cfg->window << QM_ADC_CMD_SW_OFFSET) |
(cfg->resolution << QM_ADC_CMD_RESOLUTION_OFFSET));
/* Set the callback. */
mode_callback[adc] = callback;
mode_callback_data[adc] = callback_data;
return QM_RC_OK;
/* When transitioning to Normal Mode or Normal Mode With Calibration,
* enable command complete interrupt to perform a dummy conversion. */
if ((mode >= QM_ADC_MODE_NORM_CAL)) {
QM_ADC[adc].adc_intr_enable |= QM_ADC_INTR_ENABLE_CC;
}
/* Issue mode change command. Completion if this command is notified via
* the ADC Power interrupt source, which is serviced separately to the
* Command/Calibration Complete interrupt. */
QM_ADC[adc].adc_op_mode = (QM_ADC_OP_MODE_IE | mode);
return 0;
}
qm_rc_t qm_adc_get_config(const qm_adc_t adc, qm_adc_config_t *const cfg)
int qm_adc_set_config(const qm_adc_t adc, const qm_adc_config_t *const cfg)
{
QM_CHECK(adc < QM_ADC_NUM, QM_RC_EINVAL);
QM_CHECK(adc < QM_ADC_NUM, -EINVAL);
QM_CHECK(NULL != cfg, -EINVAL);
QM_CHECK(cfg->resolution <= QM_ADC_RES_12_BITS, -EINVAL);
/* Convert cfg->resolution to actual resolution (2x+6) and add 2 to get
* minimum value for window size. */
QM_CHECK(cfg->window >= ((cfg->resolution * 2) + 8), -EINVAL);
/* Get the sample window and resolution */
cfg->window =
((adc_cmd[adc] & QM_ADC_CMD_SW_MASK) >> QM_ADC_CMD_SW_OFFSET);
cfg->resolution = ((adc_cmd[adc] & QM_ADC_CMD_RESOLUTION_MASK) >>
QM_ADC_CMD_RESOLUTION_OFFSET);
/* Set the sample window and resolution. */
sample_window[adc] = cfg->window;
resolution[adc] = cfg->resolution;
return QM_RC_OK;
return 0;
}
qm_rc_t qm_adc_convert(const qm_adc_t adc, qm_adc_xfer_t *xfer)
int qm_adc_convert(const qm_adc_t adc, qm_adc_xfer_t *xfer)
{
uint32_t i;
/* Check the stuff that was passed in */
QM_CHECK(adc < QM_ADC_NUM, QM_RC_EINVAL);
QM_CHECK(NULL != xfer, QM_RC_EINVAL);
QM_CHECK(xfer->ch_len < QM_ADC_CHAN_SEQ_MAX, QM_RC_EINVAL);
QM_CHECK(xfer->samples_len > 0, QM_RC_EINVAL);
QM_CHECK(xfer->samples_len < QM_ADC_FIFO_LEN, QM_RC_EINVAL);
QM_CHECK(adc < QM_ADC_NUM, -EINVAL);
QM_CHECK(NULL != xfer, -EINVAL);
QM_CHECK(NULL != xfer->ch, -EINVAL);
QM_CHECK(NULL != xfer->samples, -EINVAL);
QM_CHECK(xfer->ch_len > 0, -EINVAL);
QM_CHECK(xfer->ch_len <= QM_ADC_CHAN_SEQ_MAX, -EINVAL);
QM_CHECK(xfer->samples_len > 0, -EINVAL);
QM_CHECK(xfer->samples_len <= QM_ADC_FIFO_LEN, -EINVAL);
/* Flush the FIFO */
/* Flush the FIFO. */
QM_ADC[adc].adc_sample = QM_ADC_FIFO_CLEAR;
/* Populate the sample sequence table */
/* Populate the sample sequence table. */
setup_seq_table(adc, xfer);
/* Issue cmd: window & resolution, number of samples, command */
/* Issue cmd: window & resolution, number of samples, command. */
QM_ADC[adc].adc_cmd =
(adc_cmd[adc] | ((xfer->samples_len - 1) << QM_ADC_CMD_NS_OFFSET) |
(sample_window[adc] << QM_ADC_CMD_SW_OFFSET |
resolution[adc] << QM_ADC_CMD_RESOLUTION_OFFSET |
((xfer->samples_len - 1) << QM_ADC_CMD_NS_OFFSET) |
QM_ADC_CMD_START_SINGLE);
/* Wait for fifo count to reach number of samples */
/* Wait for fifo count to reach number of samples. */
while (QM_ADC[adc].adc_fifo_count != xfer->samples_len)
;
/* Read the value into the data structure */
/* Read the value into the data structure. The sample must be shifted
* right by 2, 4 or 6 bits for 10, 8 and 6 bit resolution respectively
* to get the correct value. */
for (i = 0; i < xfer->samples_len; i++) {
xfer->samples[i] = QM_ADC[adc].adc_sample;
xfer->samples[i] =
(QM_ADC[adc].adc_sample >> (2 * (3 - resolution[adc])));
}
return QM_RC_OK;
return 0;
}
qm_rc_t qm_adc_irq_convert(const qm_adc_t adc, qm_adc_xfer_t *xfer)
int qm_adc_irq_convert(const qm_adc_t adc, qm_adc_xfer_t *xfer)
{
QM_CHECK(adc < QM_ADC_NUM, QM_RC_EINVAL);
QM_CHECK(NULL != xfer, QM_RC_EINVAL);
QM_CHECK(xfer->ch_len < QM_ADC_CHAN_SEQ_MAX, QM_RC_EINVAL);
QM_CHECK(xfer->samples_len > 0, QM_RC_EINVAL);
QM_CHECK(adc < QM_ADC_NUM, -EINVAL);
QM_CHECK(NULL != xfer, -EINVAL);
QM_CHECK(NULL != xfer->ch, -EINVAL);
QM_CHECK(NULL != xfer->samples, -EINVAL);
QM_CHECK(xfer->ch_len > 0, -EINVAL);
QM_CHECK(xfer->ch_len <= QM_ADC_CHAN_SEQ_MAX, -EINVAL);
QM_CHECK(xfer->samples_len > 0, -EINVAL);
/* Reset the count and flush the FIFO */
/* Reset the count and flush the FIFO. */
count[adc] = 0;
QM_ADC[adc].adc_sample = QM_ADC_FIFO_CLEAR;
/* Populate the sample sequence table */
/* Populate the sample sequence table. */
setup_seq_table(adc, xfer);
/* Copy the xfer struct so we can get access from the ISR */
/* Copy the xfer struct so we can get access from the ISR. */
memcpy(&irq_xfer[adc], xfer, sizeof(qm_adc_xfer_t));
/* Clear and enable continuous command and fifo overrun interupts */
QM_ADC[adc].adc_intr_status =
QM_ADC_INTR_STATUS_FO | QM_ADC_INTR_STATUS_CONT_CC;
/* Clear all pending interrupts. */
QM_ADC[adc].adc_intr_status = QM_ADC_INTR_STATUS_CC |
QM_ADC_INTR_STATUS_FO |
QM_ADC_INTR_STATUS_CONT_CC;
/* Enable the continuous command and fifo overrun interupts. */
QM_ADC[adc].adc_intr_enable =
QM_ADC_INTR_ENABLE_FO | QM_ADC_INTR_ENABLE_CONT_CC;
/* Issue cmd: window & resolution, number of samples, interrupt enable
* and start continuous coversion command. If xfer->samples_len is less
* than SAMPLE_BLOCK_LEN extra samples will be discarded in the ISR */
* than FIFO_INTERRUPT_THRESHOLD extra samples will be discarded in the
* ISR. */
QM_ADC[adc].adc_cmd =
(adc_cmd[adc] | ((SAMPLE_BLOCK_LEN - 1) << QM_ADC_CMD_NS_OFFSET) |
(sample_window[adc] << QM_ADC_CMD_SW_OFFSET |
resolution[adc] << QM_ADC_CMD_RESOLUTION_OFFSET |
((FIFO_INTERRUPT_THRESHOLD - 1) << QM_ADC_CMD_NS_OFFSET) |
QM_ADC_CMD_IE | QM_ADC_CMD_START_CONT);
return QM_RC_OK;
return 0;
}
#endif /* QUARK_D2000 */

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@ -29,7 +29,8 @@
#include "qm_aon_counters.h"
static void (*callback)() = NULL;
static void (*callback)(void *) = NULL;
static void *callback_data;
static void pt_reset(const qm_scss_aon_t aonc)
{
@ -56,98 +57,94 @@ static void pt_reset(const qm_scss_aon_t aonc)
QM_SCSS_AON[aonc].aonpt_ctrl |= BIT(1);
}
void qm_aonpt_isr_0(void)
QM_ISR_DECLARE(qm_aonpt_isr_0)
{
if (callback) {
(*callback)();
(*callback)(callback_data);
}
QM_SCSS_AON[0].aonpt_ctrl |= BIT(0); /* Clear pending interrupts */
QM_ISR_EOI(QM_IRQ_AONPT_0_VECTOR);
}
qm_rc_t qm_aonc_enable(const qm_scss_aon_t aonc)
int qm_aonc_enable(const qm_scss_aon_t aonc)
{
QM_CHECK(aonc < QM_SCSS_AON_NUM, QM_RC_EINVAL);
QM_CHECK(aonc < QM_SCSS_AON_NUM, -EINVAL);
QM_SCSS_AON[aonc].aonc_cfg = 0x1;
return QM_RC_OK;
return 0;
}
qm_rc_t qm_aonc_disable(const qm_scss_aon_t aonc)
int qm_aonc_disable(const qm_scss_aon_t aonc)
{
QM_CHECK(aonc < QM_SCSS_AON_NUM, QM_RC_EINVAL);
QM_CHECK(aonc < QM_SCSS_AON_NUM, -EINVAL);
QM_SCSS_AON[aonc].aonc_cfg = 0x0;
return QM_RC_OK;
return 0;
}
uint32_t qm_aonc_get_value(const qm_scss_aon_t aonc)
int qm_aonc_get_value(const qm_scss_aon_t aonc, uint32_t * const val)
{
return QM_SCSS_AON[aonc].aonc_cnt;
QM_CHECK(aonc < QM_SCSS_AON_NUM, -EINVAL);
QM_CHECK(val != NULL, -EINVAL);
*val = QM_SCSS_AON[aonc].aonc_cnt;
return 0;
}
qm_rc_t qm_aonpt_set_config(const qm_scss_aon_t aonc,
const qm_aonpt_config_t *const cfg)
int qm_aonpt_set_config(const qm_scss_aon_t aonc,
const qm_aonpt_config_t *const cfg)
{
QM_CHECK(aonc < QM_SCSS_AON_NUM, QM_RC_EINVAL);
QM_CHECK(cfg != NULL, QM_RC_EINVAL);
QM_CHECK(aonc < QM_SCSS_AON_NUM, -EINVAL);
QM_CHECK(cfg != NULL, -EINVAL);
QM_SCSS_AON[aonc].aonpt_ctrl |= BIT(0); /* Clear pending interrupts */
QM_SCSS_AON[aonc].aonpt_cfg = cfg->count;
if (cfg->int_en) {
callback = cfg->callback;
callback_data = cfg->callback_data;
} else {
callback = NULL;
}
pt_reset(aonc);
return QM_RC_OK;
return 0;
}
qm_rc_t qm_aonpt_get_config(const qm_scss_aon_t aonc,
qm_aonpt_config_t *const cfg)
int qm_aonpt_get_value(const qm_scss_aon_t aonc, uint32_t *const val)
{
QM_CHECK(aonc < QM_SCSS_AON_NUM, QM_RC_EINVAL);
QM_CHECK(cfg != NULL, QM_RC_EINVAL);
QM_CHECK(aonc < QM_SCSS_AON_NUM, -EINVAL);
QM_CHECK(val != NULL, -EINVAL);
cfg->count = QM_SCSS_AON[aonc].aonpt_cfg;
if (callback == NULL) {
cfg->int_en = false;
} else {
cfg->int_en = true;
}
cfg->callback = callback;
return QM_RC_OK;
*val = QM_SCSS_AON[aonc].aonpt_cnt;
return 0;
}
uint32_t qm_aonpt_get_value(const qm_scss_aon_t aonc)
int qm_aonpt_get_status(const qm_scss_aon_t aonc, bool *const status)
{
return QM_SCSS_AON[aonc].aonpt_cnt;
QM_CHECK(aonc < QM_SCSS_AON_NUM, -EINVAL);
QM_CHECK(status != NULL, -EINVAL);
*status = QM_SCSS_AON[aonc].aonpt_stat & BIT(0);
return 0;
}
bool qm_aonpt_get_status(const qm_scss_aon_t aonc)
int qm_aonpt_clear(const qm_scss_aon_t aonc)
{
return QM_SCSS_AON[aonc].aonpt_stat & BIT(0);
}
qm_rc_t qm_aonpt_clear(const qm_scss_aon_t aonc)
{
QM_CHECK(aonc < QM_SCSS_AON_NUM, QM_RC_EINVAL);
QM_CHECK(aonc < QM_SCSS_AON_NUM, -EINVAL);
QM_SCSS_AON[aonc].aonpt_ctrl |= BIT(0);
return QM_RC_OK;
return 0;
}
qm_rc_t qm_aonpt_reset(const qm_scss_aon_t aonc)
int qm_aonpt_reset(const qm_scss_aon_t aonc)
{
QM_CHECK(aonc < QM_SCSS_AON_NUM, QM_RC_EINVAL);
QM_CHECK(aonc < QM_SCSS_AON_NUM, -EINVAL);
pt_reset(aonc);
return QM_RC_OK;
return 0;
}

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@ -29,9 +29,10 @@
#include "qm_comparator.h"
static void (*callback)(uint32_t) = NULL;
static void (*callback)(void *, uint32_t) = NULL;
static void *callback_data;
void qm_ac_isr(void)
QM_ISR_DECLARE(qm_ac_isr)
{
uint32_t int_status = QM_SCSS_CMP->cmp_stat_clr;
@ -54,7 +55,7 @@ void qm_ac_isr(void)
}
#endif
if (callback) {
(*callback)(int_status);
(*callback)(callback_data, int_status);
}
/* Clear all pending interrupts */
@ -63,24 +64,12 @@ void qm_ac_isr(void)
QM_ISR_EOI(QM_IRQ_AC_VECTOR);
}
qm_rc_t qm_ac_get_config(qm_ac_config_t *const config)
int qm_ac_set_config(const qm_ac_config_t *const config)
{
QM_CHECK(config != NULL, QM_RC_EINVAL);
config->callback = callback;
config->reference = QM_SCSS_CMP->cmp_ref_sel;
config->polarity = QM_SCSS_CMP->cmp_ref_pol;
config->power = QM_SCSS_CMP->cmp_pwr;
config->int_en = QM_SCSS_CMP->cmp_en;
return QM_RC_OK;
}
qm_rc_t qm_ac_set_config(const qm_ac_config_t *const config)
{
QM_CHECK(config != NULL, QM_RC_EINVAL);
QM_CHECK(config != NULL, -EINVAL);
callback = config->callback;
callback_data = config->callback_data;
QM_SCSS_CMP->cmp_ref_sel = config->reference;
QM_SCSS_CMP->cmp_ref_pol = config->polarity;
QM_SCSS_CMP->cmp_pwr = config->power;
@ -89,5 +78,5 @@ qm_rc_t qm_ac_set_config(const qm_ac_config_t *const config)
QM_SCSS_CMP->cmp_stat_clr = 0x7FFFF;
QM_SCSS_CMP->cmp_en = config->int_en;
return QM_RC_OK;
return 0;
}

View file

@ -0,0 +1,393 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "dma.h"
#ifndef UNIT_TEST
qm_dma_reg_t *qm_dma[QM_DMA_NUM] = {(qm_dma_reg_t *)QM_DMA_BASE};
#endif
/* DMA driver private data structures */
dma_cfg_prv_t dma_channel_config[QM_DMA_NUM][QM_DMA_CHANNEL_NUM] = {{{0}}};
/*
* Transfer interrupt handler.
*/
static void qm_dma_isr_handler(const qm_dma_t dma,
const qm_dma_channel_id_t channel_id)
{
uint32_t transfer_length;
dma_cfg_prv_t *chan_cfg;
volatile qm_dma_int_reg_t *int_reg = &QM_DMA[dma]->int_reg;
QM_ASSERT(int_reg->status_int_low & QM_DMA_INT_STATUS_TFR);
QM_ASSERT(int_reg->status_tfr_low & BIT(channel_id));
/* Clear interrupt */
int_reg->clear_tfr_low = BIT(channel_id);
/* Mask interrupts for this channel */
int_reg->mask_tfr_low = BIT(channel_id) << 8;
int_reg->mask_err_low = BIT(channel_id) << 8;
/* Call the callback if registered and pass the
* transfer length */
chan_cfg = &dma_channel_config[dma][channel_id];
if (chan_cfg->client_callback) {
transfer_length = get_transfer_length(dma, channel_id);
chan_cfg->client_callback(chan_cfg->callback_context,
transfer_length, 0);
}
}
/*
* Error interrupt handler.
*/
static void qm_dma_isr_err_handler(const qm_dma_t dma)
{
uint32_t interrupt_channel_mask;
dma_cfg_prv_t *chan_cfg;
qm_dma_channel_id_t channel_id = 0;
volatile qm_dma_int_reg_t *int_reg = &QM_DMA[dma]->int_reg;
QM_ASSERT(int_reg->status_int_low & QM_DMA_INT_STATUS_ERR);
QM_ASSERT(int_reg->status_err_low);
interrupt_channel_mask = int_reg->status_err_low;
while (interrupt_channel_mask) {
/* Find the channel that the interrupt is for */
if (!(interrupt_channel_mask & 0x1)) {
interrupt_channel_mask >>= 1;
channel_id++;
continue;
}
/* Clear the error interrupt for this channel */
int_reg->clear_err_low = BIT(channel_id);
/* Mask interrupts for this channel */
int_reg->mask_tfr_low = BIT(channel_id) << 8;
int_reg->mask_err_low = BIT(channel_id) << 8;
/* Call the callback if registered and pass the
* transfer error code */
chan_cfg = &dma_channel_config[dma][channel_id];
if (chan_cfg->client_callback) {
chan_cfg->client_callback(chan_cfg->callback_context, 0,
-EIO);
}
interrupt_channel_mask >>= 1;
channel_id++;
}
}
QM_ISR_DECLARE(qm_dma_0_isr_err)
{
qm_dma_isr_err_handler(QM_DMA_0);
QM_ISR_EOI(QM_IRQ_DMA_ERR_VECTOR);
}
QM_ISR_DECLARE(qm_dma_0_isr_0)
{
qm_dma_isr_handler(QM_DMA_0, QM_DMA_CHANNEL_0);
QM_ISR_EOI(QM_IRQ_DMA_0_VECTOR);
}
QM_ISR_DECLARE(qm_dma_0_isr_1)
{
qm_dma_isr_handler(QM_DMA_0, QM_DMA_CHANNEL_1);
QM_ISR_EOI(QM_IRQ_DMA_1_VECTOR);
}
#if (QUARK_SE)
QM_ISR_DECLARE(qm_dma_0_isr_2)
{
qm_dma_isr_handler(QM_DMA_0, QM_DMA_CHANNEL_2);
QM_ISR_EOI(QM_IRQ_DMA_2_VECTOR);
}
QM_ISR_DECLARE(qm_dma_0_isr_3)
{
qm_dma_isr_handler(QM_DMA_0, QM_DMA_CHANNEL_3);
QM_ISR_EOI(QM_IRQ_DMA_3_VECTOR);
}
QM_ISR_DECLARE(qm_dma_0_isr_4)
{
qm_dma_isr_handler(QM_DMA_0, QM_DMA_CHANNEL_4);
QM_ISR_EOI(QM_IRQ_DMA_4_VECTOR);
}
QM_ISR_DECLARE(qm_dma_0_isr_5)
{
qm_dma_isr_handler(QM_DMA_0, QM_DMA_CHANNEL_5);
QM_ISR_EOI(QM_IRQ_DMA_5_VECTOR);
}
QM_ISR_DECLARE(qm_dma_0_isr_6)
{
qm_dma_isr_handler(QM_DMA_0, QM_DMA_CHANNEL_6);
QM_ISR_EOI(QM_IRQ_DMA_6_VECTOR);
}
QM_ISR_DECLARE(qm_dma_0_isr_7)
{
qm_dma_isr_handler(QM_DMA_0, QM_DMA_CHANNEL_7);
QM_ISR_EOI(QM_IRQ_DMA_7_VECTOR);
}
#endif /* QUARK_SE */
int qm_dma_init(const qm_dma_t dma)
{
QM_CHECK(dma < QM_DMA_NUM, -EINVAL);
qm_dma_channel_id_t channel_id;
volatile qm_dma_int_reg_t *int_reg = &QM_DMA[dma]->int_reg;
int return_code;
/* Enable the DMA Clock */
QM_SCSS_CCU->ccu_mlayer_ahb_ctl |= QM_CCU_DMA_CLK_EN;
/* Disable the controller */
return_code = dma_controller_disable(dma);
if (return_code) {
return return_code;
}
/* Disable the channels and interrupts */
for (channel_id = 0; channel_id < QM_DMA_CHANNEL_NUM; channel_id++) {
return_code = dma_channel_disable(dma, channel_id);
if (return_code) {
return return_code;
}
dma_interrupt_disable(dma, channel_id);
}
/* Mask all interrupts */
int_reg->mask_tfr_low = CHANNEL_MASK_ALL << 8;
int_reg->mask_block_low = CHANNEL_MASK_ALL << 8;
int_reg->mask_src_trans_low = CHANNEL_MASK_ALL << 8;
int_reg->mask_dst_trans_low = CHANNEL_MASK_ALL << 8;
int_reg->mask_err_low = CHANNEL_MASK_ALL << 8;
/* Clear all interrupts */
int_reg->clear_tfr_low = CHANNEL_MASK_ALL;
int_reg->clear_block_low = CHANNEL_MASK_ALL;
int_reg->clear_src_trans_low = CHANNEL_MASK_ALL;
int_reg->clear_dst_trans_low = CHANNEL_MASK_ALL;
int_reg->clear_err_low = CHANNEL_MASK_ALL;
/* Enable the controller */
dma_controller_enable(dma);
return 0;
}
int qm_dma_channel_set_config(const qm_dma_t dma,
const qm_dma_channel_id_t channel_id,
qm_dma_channel_config_t *const channel_config)
{
QM_CHECK(dma < QM_DMA_NUM, -EINVAL);
QM_CHECK(channel_id < QM_DMA_CHANNEL_NUM, -EINVAL);
QM_CHECK(channel_config != NULL, -EINVAL);
dma_cfg_prv_t *chan_cfg = &dma_channel_config[dma][channel_id];
int return_code;
/* Set the transfer type. Only one currently supported */
return_code =
dma_set_transfer_type(dma, channel_id, QM_DMA_TYPE_SINGLE);
if (return_code) {
return return_code;
}
/* Set the source and destination transfer width. */
dma_set_source_transfer_width(dma, channel_id,
channel_config->source_transfer_width);
dma_set_destination_transfer_width(
dma, channel_id, channel_config->destination_transfer_width);
/* Set the source and destination burst transfer length. */
dma_set_source_burst_length(dma, channel_id,
channel_config->source_burst_length);
dma_set_destination_burst_length(
dma, channel_id, channel_config->destination_burst_length);
/* Set channel direction */
dma_set_transfer_direction(dma, channel_id,
channel_config->channel_direction);
/* Set the increment type depending on direction */
switch (channel_config->channel_direction) {
case QM_DMA_PERIPHERAL_TO_MEMORY:
dma_set_source_increment(dma, channel_id,
QM_DMA_ADDRESS_NO_CHANGE);
dma_set_destination_increment(dma, channel_id,
QM_DMA_ADDRESS_INCREMENT);
break;
case QM_DMA_MEMORY_TO_PERIPHERAL:
dma_set_source_increment(dma, channel_id,
QM_DMA_ADDRESS_INCREMENT);
dma_set_destination_increment(dma, channel_id,
QM_DMA_ADDRESS_NO_CHANGE);
break;
case QM_DMA_MEMORY_TO_MEMORY:
dma_set_source_increment(dma, channel_id,
QM_DMA_ADDRESS_INCREMENT);
dma_set_destination_increment(dma, channel_id,
QM_DMA_ADDRESS_INCREMENT);
break;
}
if (channel_config->channel_direction != QM_DMA_MEMORY_TO_MEMORY) {
/* Set the handshake interface. */
dma_set_handshake_interface(
dma, channel_id, channel_config->handshake_interface);
/* Set the handshake type. This is hardcoded to hardware */
dma_set_handshake_type(dma, channel_id, 0);
/* Set the handshake polarity. */
dma_set_handshake_polarity(dma, channel_id,
channel_config->handshake_polarity);
}
/* Save the client ID */
chan_cfg->callback_context = channel_config->callback_context;
/* Save the callback provided by DMA client */
chan_cfg->client_callback = channel_config->client_callback;
return 0;
}
int qm_dma_transfer_set_config(const qm_dma_t dma,
const qm_dma_channel_id_t channel_id,
qm_dma_transfer_t *const transfer_config)
{
QM_CHECK(dma < QM_DMA_NUM, -EINVAL);
QM_CHECK(channel_id < QM_DMA_CHANNEL_NUM, -EINVAL);
QM_CHECK(transfer_config != NULL, -EINVAL);
QM_CHECK(transfer_config->source_address != NULL, -EINVAL);
QM_CHECK(transfer_config->destination_address != NULL, -EINVAL);
QM_CHECK(transfer_config->block_size >= QM_DMA_CTL_H_BLOCK_TS_MIN,
-EINVAL);
QM_CHECK(transfer_config->block_size <= QM_DMA_CTL_H_BLOCK_TS_MAX,
-EINVAL);
/* Set the source and destination addresses. */
dma_set_source_address(dma, channel_id,
(uint32_t)transfer_config->source_address);
dma_set_destination_address(
dma, channel_id, (uint32_t)transfer_config->destination_address);
/* Set the block size for the transfer. */
dma_set_block_size(dma, channel_id, transfer_config->block_size);
return 0;
}
int qm_dma_transfer_start(const qm_dma_t dma,
const qm_dma_channel_id_t channel_id)
{
QM_CHECK(dma < QM_DMA_NUM, -EINVAL);
QM_CHECK(channel_id < QM_DMA_CHANNEL_NUM, -EINVAL);
volatile qm_dma_int_reg_t *int_reg = &QM_DMA[dma]->int_reg;
/* Unmask Interrupts */
int_reg->mask_tfr_low = ((BIT(channel_id) << 8) | BIT(channel_id));
int_reg->mask_err_low = ((BIT(channel_id) << 8) | BIT(channel_id));
/* Enable interrupts and the channel */
dma_interrupt_enable(dma, channel_id);
dma_channel_enable(dma, channel_id);
return 0;
}
int qm_dma_transfer_terminate(const qm_dma_t dma,
const qm_dma_channel_id_t channel_id)
{
QM_CHECK(dma < QM_DMA_NUM, -EINVAL);
QM_CHECK(channel_id < QM_DMA_CHANNEL_NUM, -EINVAL);
dma_cfg_prv_t *chan_cfg;
int return_code;
uint32_t transfer_length;
volatile qm_dma_int_reg_t *int_reg = &QM_DMA[dma]->int_reg;
/* Disable interrupts for the channel */
dma_interrupt_disable(dma, channel_id);
/* Mask Interrupts */
int_reg->mask_tfr_low = (BIT(channel_id) << 8);
int_reg->mask_err_low = (BIT(channel_id) << 8);
/* The channel is disabled and the transfer complete callback is
* triggered. This callback provides the client with the data length
* transfered before the transfer was stopped. */
return_code = dma_channel_disable(dma, channel_id);
if (!return_code) {
chan_cfg = &dma_channel_config[dma][channel_id];
if (chan_cfg->client_callback) {
transfer_length = get_transfer_length(dma, channel_id);
chan_cfg->client_callback(chan_cfg->callback_context,
transfer_length, 0);
}
}
return return_code;
}
int qm_dma_transfer_mem_to_mem(const qm_dma_t dma,
const qm_dma_channel_id_t channel_id,
qm_dma_transfer_t *const transfer_config)
{
QM_CHECK(dma < QM_DMA_NUM, -EINVAL);
QM_CHECK(channel_id < QM_DMA_CHANNEL_NUM, -EINVAL);
QM_CHECK(transfer_config != NULL, -EINVAL);
QM_CHECK(transfer_config->source_address != NULL, -EINVAL);
QM_CHECK(transfer_config->destination_address != NULL, -EINVAL);
QM_CHECK(transfer_config->block_size <= QM_DMA_CTL_H_BLOCK_TS_MAX,
-EINVAL);
int return_code;
/* Set the transfer configuration and start the transfer */
return_code =
qm_dma_transfer_set_config(dma, channel_id, transfer_config);
if (!return_code) {
return_code = qm_dma_transfer_start(dma, channel_id);
}
return return_code;
}

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@ -29,61 +29,57 @@
#include "qm_flash.h"
qm_rc_t qm_flash_set_config(const qm_flash_t flash, qm_flash_config_t *cfg)
{
QM_CHECK(flash < QM_FLASH_NUM, QM_RC_EINVAL);
QM_CHECK(cfg != NULL, QM_RC_EINVAL);
QM_CHECK(cfg->wait_states <= QM_FLASH_MAX_WAIT_STATES, QM_RC_EINVAL);
QM_CHECK(cfg->us_count <= QM_FLASH_MAX_US_COUNT, QM_RC_EINVAL);
QM_CHECK(cfg->write_disable <= QM_FLASH_WRITE_DISABLE, QM_RC_EINVAL);
#ifndef UNIT_TEST
#if (QUARK_SE)
qm_flash_reg_t *qm_flash[QM_FLASH_NUM] = {(qm_flash_reg_t *)QM_FLASH_BASE_0,
(qm_flash_reg_t *)QM_FLASH_BASE_1};
#elif(QUARK_D2000)
qm_flash_reg_t *qm_flash[QM_FLASH_NUM] = {(qm_flash_reg_t *)QM_FLASH_BASE_0};
#endif
#endif
QM_FLASH[flash].tmg_ctrl =
(QM_FLASH[flash].tmg_ctrl & QM_FLASH_TMG_DEF_MASK) |
int qm_flash_set_config(const qm_flash_t flash, const qm_flash_config_t *cfg)
{
QM_CHECK(flash < QM_FLASH_NUM, -EINVAL);
QM_CHECK(cfg != NULL, -EINVAL);
QM_CHECK(cfg->wait_states <= QM_FLASH_MAX_WAIT_STATES, -EINVAL);
QM_CHECK(cfg->us_count <= QM_FLASH_MAX_US_COUNT, -EINVAL);
QM_CHECK(cfg->write_disable <= QM_FLASH_WRITE_DISABLE, -EINVAL);
qm_flash_reg_t *const controller = QM_FLASH[flash];
controller->tmg_ctrl =
(controller->tmg_ctrl & QM_FLASH_TMG_DEF_MASK) |
(cfg->us_count | (cfg->wait_states << QM_FLASH_WAIT_STATE_OFFSET));
if (QM_FLASH_WRITE_DISABLE == cfg->write_disable) {
QM_FLASH[flash].ctrl |= QM_FLASH_WRITE_DISABLE_VAL;
controller->ctrl |= QM_FLASH_WRITE_DISABLE_VAL;
} else {
QM_FLASH[flash].ctrl &= ~QM_FLASH_WRITE_DISABLE_VAL;
controller->ctrl &= ~QM_FLASH_WRITE_DISABLE_VAL;
}
return QM_RC_OK;
return 0;
}
qm_rc_t qm_flash_get_config(const qm_flash_t flash, qm_flash_config_t *cfg)
int qm_flash_word_write(const qm_flash_t flash, const qm_flash_region_t region,
uint32_t f_addr, const uint32_t data)
{
QM_CHECK(flash < QM_FLASH_NUM, QM_RC_EINVAL);
QM_CHECK(cfg != NULL, QM_RC_EINVAL);
cfg->wait_states =
(QM_FLASH[flash].tmg_ctrl & QM_FLASH_WAIT_STATE_MASK) >>
QM_FLASH_WAIT_STATE_OFFSET;
cfg->us_count =
QM_FLASH[flash].tmg_ctrl & QM_FLASH_MICRO_SEC_COUNT_MASK;
cfg->write_disable =
(QM_FLASH[flash].ctrl & QM_FLASH_WRITE_DISABLE_VAL) >>
QM_FLASH_WRITE_DISABLE_OFFSET;
return QM_RC_OK;
}
qm_rc_t qm_flash_word_write(const qm_flash_t flash, qm_flash_region_t region,
uint32_t f_addr, uint32_t data)
{
QM_CHECK(flash < QM_FLASH_NUM, QM_RC_EINVAL);
QM_CHECK(region <= QM_FLASH_REGION_NUM, QM_RC_EINVAL);
QM_CHECK(f_addr < QM_FLASH_MAX_ADDR, QM_RC_EINVAL);
QM_CHECK(flash < QM_FLASH_NUM, -EINVAL);
QM_CHECK(region <= QM_FLASH_REGION_NUM, -EINVAL);
QM_CHECK(f_addr < QM_FLASH_MAX_ADDR, -EINVAL);
volatile uint32_t *p_wr_data, *p_wr_ctrl;
qm_flash_reg_t *const controller = QM_FLASH[flash];
/* Rom and flash write registers are laid out the same, but different */
/* locations in memory, so point to those to have the same function to*/
/* update page section based on main or rom. */
switch (region) {
case QM_FLASH_REGION_SYS:
p_wr_data = &QM_FLASH[flash].flash_wr_data;
p_wr_ctrl = &QM_FLASH[flash].flash_wr_ctrl;
p_wr_data = &controller->flash_wr_data;
p_wr_ctrl = &controller->flash_wr_ctrl;
#if (QUARK_D2000)
/* Main flash memory starts after flash data section. */
f_addr += QM_FLASH_REGION_DATA_0_SIZE;
@ -92,18 +88,18 @@ qm_rc_t qm_flash_word_write(const qm_flash_t flash, qm_flash_region_t region,
#if (QUARK_D2000)
case QM_FLASH_REGION_DATA:
p_wr_data = &QM_FLASH[flash].flash_wr_data;
p_wr_ctrl = &QM_FLASH[flash].flash_wr_ctrl;
p_wr_data = &controller->flash_wr_data;
p_wr_ctrl = &controller->flash_wr_ctrl;
break;
#endif
case QM_FLASH_REGION_OTP:
p_wr_data = &QM_FLASH[flash].rom_wr_data;
p_wr_ctrl = &QM_FLASH[flash].rom_wr_ctrl;
p_wr_data = &controller->rom_wr_data;
p_wr_ctrl = &controller->rom_wr_ctrl;
break;
default:
return QM_RC_ERROR;
return -EINVAL;
break;
}
/* Update address to include the write_address offset. */
@ -112,23 +108,26 @@ qm_rc_t qm_flash_word_write(const qm_flash_t flash, qm_flash_region_t region,
*p_wr_data = data;
*p_wr_ctrl = f_addr |= WR_REQ;
/* Wait for write to finish. */
while (!(QM_FLASH[flash].flash_stts & WR_DONE))
while (!(controller->flash_stts & WR_DONE))
;
return QM_RC_OK;
return 0;
}
qm_rc_t qm_flash_page_write(const qm_flash_t flash, qm_flash_region_t region,
uint32_t page_num, uint32_t *data, uint32_t len)
int qm_flash_page_write(const qm_flash_t flash, const qm_flash_region_t region,
uint32_t page_num, const uint32_t *const data,
uint32_t len)
{
QM_CHECK(flash < QM_FLASH_NUM, QM_RC_EINVAL);
QM_CHECK(region <= QM_FLASH_REGION_NUM, QM_RC_EINVAL);
QM_CHECK(page_num <= QM_FLASH_MAX_PAGE_NUM, QM_RC_EINVAL);
QM_CHECK(data != NULL, QM_RC_EINVAL);
QM_CHECK(len <= QM_FLASH_PAGE_SIZE, QM_RC_EINVAL);
QM_CHECK(flash < QM_FLASH_NUM, -EINVAL);
QM_CHECK(region <= QM_FLASH_REGION_NUM, -EINVAL);
QM_CHECK(page_num <= QM_FLASH_MAX_PAGE_NUM, -EINVAL);
QM_CHECK(data != NULL, -EINVAL);
QM_CHECK(len <= QM_FLASH_PAGE_SIZE_DWORDS, -EINVAL);
uint32_t i;
volatile uint32_t *p_wr_data, *p_wr_ctrl;
qm_flash_reg_t *const controller = QM_FLASH[flash];
/* Rom and flash write registers are laid out the same, but different */
/* locations in memory, so point to those to have the same function to*/
/* update page section based on main or rom. */
@ -140,17 +139,17 @@ qm_rc_t qm_flash_page_write(const qm_flash_t flash, qm_flash_region_t region,
case QM_FLASH_REGION_DATA:
#endif
p_wr_data = &QM_FLASH[flash].flash_wr_data;
p_wr_ctrl = &QM_FLASH[flash].flash_wr_ctrl;
p_wr_data = &controller->flash_wr_data;
p_wr_ctrl = &controller->flash_wr_ctrl;
break;
case QM_FLASH_REGION_OTP:
p_wr_data = &QM_FLASH[flash].rom_wr_data;
p_wr_ctrl = &QM_FLASH[flash].rom_wr_ctrl;
p_wr_data = &controller->rom_wr_data;
p_wr_ctrl = &controller->rom_wr_ctrl;
break;
default:
return QM_RC_ERROR;
return -EINVAL;
break;
}
/* Update address to include the write_address offset. */
@ -160,7 +159,7 @@ qm_rc_t qm_flash_page_write(const qm_flash_t flash, qm_flash_region_t region,
*p_wr_ctrl = page_num | ER_REQ;
/* Wait for the erase to complete. */
while (!(QM_FLASH[flash].flash_stts & ER_DONE))
while (!(controller->flash_stts & ER_DONE))
;
/* Write bytes into Flash. */
@ -170,34 +169,36 @@ qm_rc_t qm_flash_page_write(const qm_flash_t flash, qm_flash_region_t region,
*p_wr_ctrl |= WR_REQ;
page_num += QM_FLASH_ADDR_INC;
/* Wait for write to finish. */
while (!(QM_FLASH[flash].flash_stts & WR_DONE))
while (!(controller->flash_stts & WR_DONE))
;
}
return QM_RC_OK;
return 0;
}
qm_rc_t qm_flash_page_update(const qm_flash_t flash, qm_flash_region_t region,
uint32_t f_addr, uint32_t *page_buffer,
uint32_t *data_buffer, uint32_t len)
int qm_flash_page_update(const qm_flash_t flash, const qm_flash_region_t region,
uint32_t f_addr, uint32_t *const page_buffer,
const uint32_t *const data_buffer, uint32_t len)
{
QM_CHECK(flash < QM_FLASH_NUM, QM_RC_EINVAL);
QM_CHECK(region <= QM_FLASH_REGION_NUM, QM_RC_EINVAL);
QM_CHECK(f_addr < QM_FLASH_MAX_ADDR, QM_RC_EINVAL);
QM_CHECK(page_buffer != NULL, QM_RC_EINVAL);
QM_CHECK(data_buffer != NULL, QM_RC_EINVAL);
QM_CHECK(len <= QM_FLASH_PAGE_SIZE, QM_RC_EINVAL);
QM_CHECK(flash < QM_FLASH_NUM, -EINVAL);
QM_CHECK(region <= QM_FLASH_REGION_NUM, -EINVAL);
QM_CHECK(f_addr < QM_FLASH_MAX_ADDR, -EINVAL);
QM_CHECK(page_buffer != NULL, -EINVAL);
QM_CHECK(data_buffer != NULL, -EINVAL);
QM_CHECK(len <= QM_FLASH_PAGE_SIZE_DWORDS, -EINVAL);
uint32_t i, j;
volatile uint32_t *p_flash = NULL, *p_wr_data, *p_wr_ctrl;
qm_flash_reg_t *const controller = QM_FLASH[flash];
/* Rom and flash write registers are laid out the same, but different */
/* locations in memory, so point to those to have the same function to*/
/* update page section based on main or rom. */
switch (region) {
case QM_FLASH_REGION_SYS:
p_wr_data = &QM_FLASH[flash].flash_wr_data;
p_wr_ctrl = &QM_FLASH[flash].flash_wr_ctrl;
p_wr_data = &controller->flash_wr_data;
p_wr_ctrl = &controller->flash_wr_ctrl;
#if (QUARK_D2000)
p_flash = (uint32_t *)(QM_FLASH_REGION_SYS_0_BASE +
(f_addr & QM_FLASH_PAGE_MASK));
@ -218,27 +219,27 @@ qm_rc_t qm_flash_page_update(const qm_flash_t flash, qm_flash_region_t region,
#if (QUARK_D2000)
case QM_FLASH_REGION_DATA:
p_wr_data = &QM_FLASH[flash].flash_wr_data;
p_wr_ctrl = &QM_FLASH[flash].flash_wr_ctrl;
p_wr_data = &controller->flash_wr_data;
p_wr_ctrl = &controller->flash_wr_ctrl;
p_flash = (uint32_t *)(QM_FLASH_REGION_DATA_0_BASE +
(f_addr & QM_FLASH_PAGE_MASK));
break;
#endif
case QM_FLASH_REGION_OTP:
p_wr_data = &QM_FLASH[flash].rom_wr_data;
p_wr_ctrl = &QM_FLASH[flash].rom_wr_ctrl;
p_wr_data = &controller->rom_wr_data;
p_wr_ctrl = &controller->rom_wr_ctrl;
p_flash = (uint32_t *)(QM_FLASH_REGION_OTP_0_BASE +
(f_addr & QM_FLASH_PAGE_MASK));
break;
default:
return QM_RC_ERROR;
return -EINVAL;
break;
}
/* Copy Flash Page, with location to be modified, to SRAM */
for (i = 0; i < QM_FLASH_PAGE_SIZE; i++) {
for (i = 0; i < QM_FLASH_PAGE_SIZE_DWORDS; i++) {
page_buffer[i] = *p_flash;
p_flash++;
}
@ -253,30 +254,32 @@ qm_rc_t qm_flash_page_update(const qm_flash_t flash, qm_flash_region_t region,
}
/* Wait for the erase to complete */
while (!(QM_FLASH[flash].flash_stts & ER_DONE))
while (!(controller->flash_stts & ER_DONE))
;
/* Update address to include the write_address offset. */
f_addr &= QM_FLASH_PAGE_MASK;
f_addr <<= WR_ADDR_OFFSET;
/* Copy the modified page in SRAM into Flash. */
for (i = 0; i < QM_FLASH_PAGE_SIZE; i++) {
for (i = 0; i < QM_FLASH_PAGE_SIZE_DWORDS; i++) {
*p_wr_data = page_buffer[i];
*p_wr_ctrl = f_addr |= WR_REQ;
f_addr += QM_FLASH_ADDR_INC;
/* Wait for write to finish. */
while (!(QM_FLASH[flash].flash_stts & WR_DONE))
while (!(controller->flash_stts & WR_DONE))
;
}
return QM_RC_OK;
return 0;
}
qm_rc_t qm_flash_page_erase(const qm_flash_t flash, qm_flash_region_t region,
uint32_t page_num)
int qm_flash_page_erase(const qm_flash_t flash, const qm_flash_region_t region,
uint32_t page_num)
{
QM_CHECK(flash < QM_FLASH_NUM, QM_RC_EINVAL);
QM_CHECK(region <= QM_FLASH_REGION_NUM, QM_RC_EINVAL);
QM_CHECK(page_num <= QM_FLASH_MAX_PAGE_NUM, QM_RC_EINVAL);
QM_CHECK(flash < QM_FLASH_NUM, -EINVAL);
QM_CHECK(region <= QM_FLASH_REGION_NUM, -EINVAL);
QM_CHECK(page_num <= QM_FLASH_MAX_PAGE_NUM, -EINVAL);
qm_flash_reg_t *const controller = QM_FLASH[flash];
switch (region) {
@ -286,36 +289,38 @@ qm_rc_t qm_flash_page_erase(const qm_flash_t flash, qm_flash_region_t region,
case QM_FLASH_REGION_DATA:
#endif
QM_FLASH[flash].flash_wr_ctrl =
controller->flash_wr_ctrl =
(page_num << (QM_FLASH_PAGE_SIZE_BITS + WR_ADDR_OFFSET)) |
ER_REQ;
break;
case QM_FLASH_REGION_OTP:
QM_FLASH[flash].rom_wr_ctrl =
controller->rom_wr_ctrl =
(page_num << (QM_FLASH_PAGE_SIZE_BITS + WR_ADDR_OFFSET)) |
ER_REQ;
break;
default:
return QM_RC_EINVAL;
return -EINVAL;
}
while (!(QM_FLASH[flash].flash_stts & ER_DONE))
while (!(controller->flash_stts & ER_DONE))
;
return QM_RC_OK;
return 0;
}
qm_rc_t qm_flash_mass_erase(const qm_flash_t flash, uint8_t include_rom)
int qm_flash_mass_erase(const qm_flash_t flash, const uint8_t include_rom)
{
QM_CHECK(flash < QM_FLASH_NUM, QM_RC_EINVAL);
QM_CHECK(flash < QM_FLASH_NUM, -EINVAL);
qm_flash_reg_t *const controller = QM_FLASH[flash];
/* Erase all the Flash pages */
if (include_rom) {
QM_FLASH[flash].ctrl |= MASS_ERASE_INFO;
controller->ctrl |= MASS_ERASE_INFO;
}
QM_FLASH[flash].ctrl |= MASS_ERASE;
while (!(QM_FLASH[flash].flash_stts & ER_DONE))
controller->ctrl |= MASS_ERASE;
while (!(controller->flash_stts & ER_DONE))
;
return QM_RC_OK;
return 0;
}

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@ -28,128 +28,115 @@
*/
#include "qm_fpr.h"
#include "qm_interrupt.h"
static void (*callback[QM_FLASH_NUM])(void);
static void (*callback[QM_FLASH_NUM])(void *);
static void *callback_data[QM_FLASH_NUM];
void qm_fpr_isr_0(void)
QM_ISR_DECLARE(qm_fpr_isr_0)
{
(*callback[QM_FLASH_0])();
QM_FLASH[QM_FLASH_0].mpr_vsts = QM_FPR_MPR_VSTS_VALID;
(*callback[QM_FLASH_0])(callback_data[QM_FLASH_0]);
QM_FLASH[QM_FLASH_0]->mpr_vsts = QM_FPR_MPR_VSTS_VALID;
QM_ISR_EOI(QM_IRQ_FLASH_0_VECTOR);
}
#if (QUARK_SE)
void qm_fpr_isr_1(void)
QM_ISR_DECLARE(qm_fpr_isr_1)
{
(*callback[QM_FLASH_1])();
QM_FLASH[QM_FLASH_1].mpr_vsts = QM_FPR_MPR_VSTS_VALID;
(*callback[QM_FLASH_1])(callback_data[QM_FLASH_1]);
QM_FLASH[QM_FLASH_1]->mpr_vsts = QM_FPR_MPR_VSTS_VALID;
QM_ISR_EOI(QM_IRQ_FLASH_1_VECTOR);
}
#endif
qm_rc_t qm_fpr_set_config(const qm_flash_t flash, const qm_fpr_id_t id,
const qm_fpr_config_t *const cfg,
const qm_flash_region_type_t region)
int qm_fpr_set_config(const qm_flash_t flash, const qm_fpr_id_t id,
const qm_fpr_config_t *const cfg,
const qm_flash_region_type_t region)
{
QM_CHECK(flash < QM_FLASH_NUM, QM_RC_EINVAL);
QM_CHECK(id < QM_FPR_NUM, QM_RC_EINVAL);
QM_CHECK(region < QM_MAIN_FLASH_NUM, QM_RC_EINVAL);
QM_CHECK(cfg != NULL, QM_RC_EINVAL);
QM_CHECK(cfg->low_bound <= cfg->up_bound, QM_RC_EINVAL);
QM_CHECK(flash < QM_FLASH_NUM, -EINVAL);
QM_CHECK(id < QM_FPR_NUM, -EINVAL);
QM_CHECK(region < QM_MAIN_FLASH_NUM, -EINVAL);
QM_CHECK(cfg != NULL, -EINVAL);
QM_CHECK(cfg->low_bound <= cfg->up_bound, -EINVAL);
QM_FLASH[flash].fpr_rd_cfg[id] &= ~QM_FPR_LOCK;
qm_flash_reg_t *const controller = QM_FLASH[flash];
controller->fpr_rd_cfg[id] &= ~QM_FPR_LOCK;
if (region == QM_MAIN_FLASH_SYSTEM) {
QM_FLASH[flash].fpr_rd_cfg[id] =
controller->fpr_rd_cfg[id] =
(cfg->allow_agents << QM_FPR_RD_ALLOW_OFFSET) |
((cfg->up_bound + QM_FLASH_REGION_DATA_BASE_OFFSET)
<< QM_FPR_UPPER_BOUND_OFFSET) |
(cfg->low_bound + QM_FLASH_REGION_DATA_BASE_OFFSET);
}
#if (QUARK_D2000)
else if (region == QM_MAIN_FLASH_OTP) {
QM_FLASH[flash].fpr_rd_cfg[id] =
else if (region == QM_MAIN_FLASH_DATA) {
controller->fpr_rd_cfg[id] =
(cfg->allow_agents << QM_FPR_RD_ALLOW_OFFSET) |
(cfg->up_bound << QM_FPR_UPPER_BOUND_OFFSET) |
cfg->low_bound;
}
#endif
/* qm_fpr_id_t enable/lock */
QM_FLASH[flash].fpr_rd_cfg[id] |=
(cfg->en_mask << QM_FPR_ENABLE_OFFSET);
controller->fpr_rd_cfg[id] |= (cfg->en_mask << QM_FPR_ENABLE_OFFSET);
return QM_RC_OK;
return 0;
}
qm_rc_t qm_fpr_get_config(const qm_flash_t flash, const qm_fpr_id_t id,
qm_fpr_config_t *const cfg,
const qm_flash_region_type_t region)
int qm_fpr_set_violation_policy(const qm_fpr_viol_mode_t mode,
const qm_flash_t flash,
qm_fpr_callback_t callback_fn, void *data)
{
QM_CHECK(flash < QM_FLASH_NUM, QM_RC_EINVAL);
QM_CHECK(id < QM_FPR_NUM, QM_RC_EINVAL);
QM_CHECK(region < QM_MAIN_FLASH_NUM, QM_RC_EINVAL);
QM_CHECK(cfg != NULL, QM_RC_EINVAL);
cfg->en_mask = (QM_FLASH[flash].fpr_rd_cfg[id] & QM_FPR_ENABLE_MASK) >>
QM_FPR_ENABLE_OFFSET;
cfg->allow_agents =
(QM_FLASH[flash].fpr_rd_cfg[id] & QM_FPR_RD_ALLOW_MASK) >>
QM_FPR_RD_ALLOW_OFFSET;
if (region == QM_MAIN_FLASH_SYSTEM) {
cfg->up_bound = ((QM_FLASH[flash].fpr_rd_cfg[id] &
QM_FPR_UPPER_BOUND_MASK) >>
QM_FPR_UPPER_BOUND_OFFSET) -
QM_FLASH_REGION_DATA_BASE_OFFSET;
cfg->low_bound =
((QM_FLASH[flash].fpr_rd_cfg[id] & QM_FPR_LOW_BOUND_MASK) -
QM_FLASH_REGION_DATA_BASE_OFFSET);
}
#if (QUARK_D2000)
else if (region == QM_MAIN_FLASH_OTP) {
cfg->up_bound = (QM_FLASH[flash].fpr_rd_cfg[id] &
QM_FPR_UPPER_BOUND_MASK) >>
QM_FPR_UPPER_BOUND_OFFSET;
cfg->low_bound =
QM_FLASH[flash].fpr_rd_cfg[id] & QM_FPR_LOW_BOUND_MASK;
}
#endif
return QM_RC_OK;
}
qm_rc_t qm_fpr_set_violation_policy(const qm_fpr_viol_mode_t mode,
const qm_flash_t flash,
qm_fpr_callback_t callback_fn)
{
QM_CHECK(mode <= FPR_VIOL_MODE_PROBE, QM_RC_EINVAL);
QM_CHECK(flash < QM_FLASH_NUM, QM_RC_EINVAL);
QM_CHECK(mode <= FPR_VIOL_MODE_PROBE, -EINVAL);
QM_CHECK(flash < QM_FLASH_NUM, -EINVAL);
volatile uint32_t *int_flash_controller_mask =
&QM_SCSS_INT->int_flash_controller_0_mask;
/* interrupt mode */
if (FPR_VIOL_MODE_INTERRUPT == mode) {
QM_CHECK(callback_fn != NULL, QM_RC_EINVAL);
callback[flash] = callback_fn;
callback_data[flash] = data;
/* host interrupt to Lakemont core */
/* unmask interrupt */
if (flash == QM_FLASH_0) {
qm_irq_unmask(QM_IRQ_FLASH_0);
#if (QUARK_SE)
} else {
qm_irq_unmask(QM_IRQ_FLASH_1);
#endif
}
#if defined(QM_SENSOR)
int_flash_controller_mask[flash] |=
QM_INT_FLASH_CONTROLLER_SS_HALT_MASK;
#else /* QM_SENSOR */
int_flash_controller_mask[flash] |=
QM_INT_FLASH_CONTROLLER_HOST_HALT_MASK;
int_flash_controller_mask[flash] &=
~QM_INT_FLASH_CONTROLLER_HOST_MASK;
#endif /* QM_SENSOR */
QM_SCSS_PMU->p_sts &= ~QM_P_STS_HALT_INTERRUPT_REDIRECTION;
}
/* probe or reset mode */
else {
int_flash_controller_mask[flash] |=
QM_INT_FLASH_CONTROLLER_HOST_MASK;
/* mask interrupt */
if (flash == QM_FLASH_0) {
qm_irq_mask(QM_IRQ_FLASH_0);
#if (QUARK_SE)
} else {
qm_irq_mask(QM_IRQ_FLASH_1);
#endif
}
#if defined(QM_SENSOR)
int_flash_controller_mask[flash] &=
~QM_INT_FLASH_CONTROLLER_SS_HALT_MASK;
#else /* QM_SENSOR */
int_flash_controller_mask[flash] &=
~QM_INT_FLASH_CONTROLLER_HOST_HALT_MASK;
#endif /* QM_SENSOR */
if (FPR_VIOL_MODE_PROBE == mode) {
@ -167,5 +154,5 @@ qm_rc_t qm_fpr_set_violation_policy(const qm_fpr_viol_mode_t mode,
~QM_P_STS_HALT_INTERRUPT_REDIRECTION;
}
}
return QM_RC_OK;
return 0;
}

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@ -38,14 +38,15 @@ qm_gpio_reg_t *qm_gpio[QM_GPIO_NUM] = {(qm_gpio_reg_t *)QM_GPIO_BASE};
#endif
#endif
static void (*callback[QM_GPIO_NUM])(uint32_t);
static void (*callback[QM_GPIO_NUM])(void *, uint32_t);
static void *callback_data[QM_GPIO_NUM];
static void gpio_isr(const qm_gpio_t gpio)
{
uint32_t int_status = QM_GPIO[gpio]->gpio_intstatus;
const uint32_t int_status = QM_GPIO[gpio]->gpio_intstatus;
if (callback[gpio]) {
(*callback[gpio])(int_status);
(*callback[gpio])(callback_data[gpio], int_status);
}
/* This will clear all pending interrupts flags in status */
@ -55,25 +56,25 @@ static void gpio_isr(const qm_gpio_t gpio)
QM_GPIO[gpio]->gpio_porta_eoi;
}
void qm_gpio_isr_0(void)
QM_ISR_DECLARE(qm_gpio_isr_0)
{
gpio_isr(QM_GPIO_0);
QM_ISR_EOI(QM_IRQ_GPIO_0_VECTOR);
}
#if (HAS_AON_GPIO)
void qm_aon_gpio_isr_0(void)
QM_ISR_DECLARE(qm_aon_gpio_isr_0)
{
gpio_isr(QM_AON_GPIO_0);
QM_ISR_EOI(QM_IRQ_AONGPIO_0_VECTOR);
}
#endif
qm_rc_t qm_gpio_set_config(const qm_gpio_t gpio,
const qm_gpio_port_config_t *const cfg)
int qm_gpio_set_config(const qm_gpio_t gpio,
const qm_gpio_port_config_t *const cfg)
{
QM_CHECK(gpio < QM_GPIO_NUM, QM_RC_EINVAL);
QM_CHECK(cfg != NULL, QM_RC_EINVAL);
QM_CHECK(gpio < QM_GPIO_NUM, -EINVAL);
QM_CHECK(cfg != NULL, -EINVAL);
qm_gpio_reg_t *const controller = QM_GPIO[gpio];
@ -81,72 +82,80 @@ qm_rc_t qm_gpio_set_config(const qm_gpio_t gpio,
controller->gpio_intmask = 0xffffffff;
controller->gpio_swporta_ddr = cfg->direction;
controller->gpio_inten = cfg->int_en;
controller->gpio_inttype_level = cfg->int_type;
controller->gpio_int_polarity = cfg->int_polarity;
controller->gpio_debounce = cfg->int_debounce;
controller->gpio_int_bothedge = cfg->int_bothedge;
callback[gpio] = cfg->callback;
controller->gpio_inten = cfg->int_en;
callback_data[gpio] = cfg->callback_data;
controller->gpio_intmask = mask;
return QM_RC_OK;
return 0;
}
qm_rc_t qm_gpio_get_config(const qm_gpio_t gpio,
qm_gpio_port_config_t *const cfg)
int qm_gpio_read_pin(const qm_gpio_t gpio, const uint8_t pin,
qm_gpio_state_t *const state)
{
QM_CHECK(gpio < QM_GPIO_NUM, QM_RC_EINVAL);
QM_CHECK(cfg != NULL, QM_RC_EINVAL);
QM_CHECK(gpio < QM_GPIO_NUM, -EINVAL);
QM_CHECK(pin <= QM_NUM_GPIO_PINS, -EINVAL);
QM_CHECK(state != NULL, -EINVAL);
qm_gpio_reg_t *const controller = QM_GPIO[gpio];
*state = ((QM_GPIO[gpio]->gpio_ext_porta) >> pin) & 1;
cfg->direction = controller->gpio_swporta_ddr;
cfg->int_en = controller->gpio_inten;
cfg->int_type = controller->gpio_inttype_level;
cfg->int_polarity = controller->gpio_int_polarity;
cfg->int_debounce = controller->gpio_debounce;
cfg->int_bothedge = controller->gpio_int_bothedge;
cfg->callback = callback[gpio];
return QM_RC_OK;
return 0;
}
bool qm_gpio_read_pin(const qm_gpio_t gpio, const uint8_t pin)
int qm_gpio_set_pin(const qm_gpio_t gpio, const uint8_t pin)
{
return (((QM_GPIO[gpio]->gpio_ext_porta) >> pin) & 1);
}
qm_rc_t qm_gpio_set_pin(const qm_gpio_t gpio, const uint8_t pin)
{
QM_CHECK(gpio < QM_GPIO_NUM, QM_RC_EINVAL);
QM_CHECK(pin <= QM_NUM_GPIO_PINS, QM_RC_EINVAL);
QM_CHECK(gpio < QM_GPIO_NUM, -EINVAL);
QM_CHECK(pin <= QM_NUM_GPIO_PINS, -EINVAL);
QM_GPIO[gpio]->gpio_swporta_dr |= (1 << pin);
return QM_RC_OK;
return 0;
}
qm_rc_t qm_gpio_clear_pin(const qm_gpio_t gpio, const uint8_t pin)
int qm_gpio_clear_pin(const qm_gpio_t gpio, const uint8_t pin)
{
QM_CHECK(gpio < QM_GPIO_NUM, QM_RC_EINVAL);
QM_CHECK(pin <= QM_NUM_GPIO_PINS, QM_RC_EINVAL);
QM_CHECK(gpio < QM_GPIO_NUM, -EINVAL);
QM_CHECK(pin <= QM_NUM_GPIO_PINS, -EINVAL);
QM_GPIO[gpio]->gpio_swporta_dr &= ~(1 << pin);
return QM_RC_OK;
return 0;
}
uint32_t qm_gpio_read_port(const qm_gpio_t gpio)
int qm_gpio_set_pin_state(const qm_gpio_t gpio, const uint8_t pin,
const qm_gpio_state_t state)
{
return (QM_GPIO[gpio]->gpio_ext_porta);
QM_CHECK(gpio < QM_GPIO_NUM, -EINVAL);
QM_CHECK(pin <= QM_NUM_GPIO_PINS, -EINVAL);
QM_CHECK(state < QM_GPIO_STATE_NUM, -EINVAL);
uint32_t reg = QM_GPIO[gpio]->gpio_swporta_dr;
reg ^= (-state ^ reg) & (1 << pin);
QM_GPIO[gpio]->gpio_swporta_dr = reg;
return 0;
}
qm_rc_t qm_gpio_write_port(const qm_gpio_t gpio, const uint32_t val)
int qm_gpio_read_port(const qm_gpio_t gpio, uint32_t *const port)
{
QM_CHECK(gpio < QM_GPIO_NUM, QM_RC_EINVAL);
QM_CHECK(gpio < QM_GPIO_NUM, -EINVAL);
QM_CHECK(port != NULL, -EINVAL);
*port = QM_GPIO[gpio]->gpio_ext_porta;
return 0;
}
int qm_gpio_write_port(const qm_gpio_t gpio, const uint32_t val)
{
QM_CHECK(gpio < QM_GPIO_NUM, -EINVAL);
QM_GPIO[gpio]->gpio_swporta_dr = val;
return QM_RC_OK;
return 0;
}

File diff suppressed because it is too large Load diff

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@ -49,7 +49,5 @@ uint32_t qm_soc_version(void)
return (QUARK_D2000_SOC_ID << 8) | QM_SCSS_INFO->rev;
#elif(QUARK_SE)
return (QM_SCSS_GP->id << 8) | QM_SCSS_GP->rev;
#else
#error "Unsupported / unspecified processor detected."
#endif
}

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@ -37,6 +37,11 @@
#elif(HAS_MVIC)
#include "mvic.h"
#elif(QM_SENSOR)
#include "qm_ss_interrupt.h"
#include "qm_sensor_regs.h"
extern qm_ss_isr_t __ivt_vect_table[];
#else
#error "Unsupported / unspecified processor detected."
#endif
@ -44,17 +49,29 @@
/* SCSS base addr for LMT interrupt routing, for linear IRQ mapping */
#define SCSS_LMT_INT_MASK_BASE (&QM_SCSS_INT->int_i2c_mst_0_mask)
/* SCSS interrupt router: Lakemont delivery masking */
#define SCSS_LMT_INT_MASK BIT(0)
#if (QM_SENSOR)
#define SCSS_INT_MASK BIT(8) /* Sensor Subsystem interrupt masking */
static void ss_register_irq(unsigned int vector);
#else
#define SCSS_INT_MASK BIT(0) /* Lakemont interrupt masking */
#endif
void qm_irq_disable(void)
{
#if (QM_SENSOR)
qm_ss_irq_disable();
#else
__asm__ __volatile__("cli");
#endif
}
void qm_irq_enable(void)
{
#if (QM_SENSOR)
qm_ss_irq_enable();
#else
__asm__ __volatile__("sti");
#endif
}
void qm_irq_mask(uint32_t irq)
@ -65,8 +82,9 @@ void qm_irq_mask(uint32_t irq)
#elif(HAS_MVIC)
mvic_mask_irq(irq);
#else
#error "Unsupported / unspecified processor detected."
#elif(QM_SENSOR)
qm_ss_irq_mask(irq);
#endif
}
@ -78,8 +96,9 @@ void qm_irq_unmask(uint32_t irq)
#elif(HAS_MVIC)
mvic_unmask_irq(irq);
#else
#error "Unsupported / unspecified processor detected."
#elif(QM_SENSOR)
qm_ss_irq_unmask(irq);
#endif
}
@ -95,37 +114,73 @@ void _qm_irq_setup(uint32_t irq, uint16_t register_offset)
ioapic_register_irq(irq, QM_IRQ_TO_VECTOR(irq));
#elif(HAS_MVIC)
mvic_register_irq(irq);
#else
#error "Unsupported / unspecified interrupt controller detected."
#elif(QM_SENSOR)
ss_register_irq(QM_IRQ_TO_VECTOR(irq));
#endif
/* Route peripheral interrupt to Lakemont */
/* Route peripheral interrupt to Lakemont/Sensor Subsystem */
scss_intmask = (uint32_t *)SCSS_LMT_INT_MASK_BASE + register_offset;
#if (QUARK_SE || QUARK_D2000)
/* On Quark D2000 and SE the register for the analog comparator host
* mask has a different bit field than the other host mask registers.
*/
#if (QUARK_SE || QUARK_D2000 || QM_SENSOR)
/* On Quark D2000 and Quark SE the register for the analog comparator
* host mask has a different bit field than the other host mask
* registers. */
if (QM_IRQ_AC_MASK_OFFSET == register_offset) {
*scss_intmask &= ~0x0007ffff;
#if !defined(QUARK_D2000)
} else if (QM_IRQ_MBOX_MASK_OFFSET == register_offset) {
/* Masking MAILBOX irq id done inside mbox driver */
#endif
} else {
*scss_intmask &= ~SCSS_LMT_INT_MASK;
*scss_intmask &= ~SCSS_INT_MASK;
}
#else
*scss_intmask &= ~SCSS_LMT_INT_MASK;
*scss_intmask &= ~SCSS_INT_MASK;
#endif
#if (HAS_APIC)
ioapic_unmask_irq(irq);
#elif(HAS_MVIC)
mvic_unmask_irq(irq);
#else
#error "Unsupported / unspecified interrupt controller detected."
#elif(QM_SENSOR)
qm_ss_irq_unmask(QM_IRQ_TO_VECTOR(irq));
#endif
}
/*
* Register an Interrupt Service Routine to a given interrupt vector.
*
* @param[in] vector Interrupt Vector number.
* @param[in] isr ISR to register to given vector. Must be a valid x86 ISR.
* If this can't be provided, qm_irq_request() or
* qm_int_vector_request() should be used instead.
*/
void _qm_register_isr(uint32_t vector, qm_isr_t isr)
{
#if (QM_SENSOR)
__ivt_vect_table[vector] = isr;
#else
idt_set_intr_gate_desc(vector, (uint32_t)isr);
#endif
}
#if (QM_SENSOR)
static void ss_register_irq(unsigned int vector)
{
/*
* By hardware power-on default, SS interrupts are level triggered.
* The following switch statement sets some of the peripherals to edge
* triggered.
*/
switch (vector) {
case QM_SS_IRQ_ADC_PWR_VECTOR:
case QM_IRQ_RTC_0_VECTOR:
case QM_IRQ_AONPT_0_VECTOR:
case QM_IRQ_WDT_0_VECTOR:
/* Edge sensitive. */
__builtin_arc_sr(vector, QM_SS_AUX_IRQ_SELECT);
__builtin_arc_sr(QM_SS_IRQ_EDGE_SENSITIVE,
QM_SS_AUX_IRQ_TRIGER);
}
}
#endif

View file

@ -0,0 +1,190 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "qm_common.h"
#include "qm_mailbox.h"
#include "qm_interrupt.h"
/* Register offsets from a base register for a particular mailbox channel. */
#define QM_MBOX_CTRL_OFFSET (0x00)
#define QM_MBOX_DATA0_OFFSET (0x04)
#define QM_MBOX_DATA1_OFFSET (0x08)
#define QM_MBOX_DATA2_OFFSET (0x0C)
#define QM_MBOX_DATA3_OFFSET (0x10)
#define QM_MBOX_STATUS_OFFSET (0x14)
#define QM_MBOX_SS_MASK_OFFSET (0x8)
/* Private data structure maintained by the driver */
typedef struct qm_mailbox_info_t {
/*!< Callback function registered with the application. */
qm_mbox_callback_t mpr_cb;
/*!< Callback function data return via the callback function. */
void *cb_data;
} qm_mailbox_info_t;
/* Mailbox channels private data structures */
static qm_mailbox_info_t mailbox_devs[QM_MBOX_CH_NUM];
QM_ISR_DECLARE(qm_mbox_isr)
{
qm_mailbox_t *const mbox_reg = (qm_mailbox_t *)QM_SCSS_MAILBOX;
uint8_t i = 0;
uint8_t mask;
uint16_t chall_sts = QM_SCSS_MAILBOX->mbox_chall_sts;
/*
* Interrupt masking register is has a bit assigned per MBOX channel.
* QM_SCSS_INT 0-7 bits are MBOX interrupt gates to APIC, 8-15 are
* gating interrupts to Sensors PIC.
*/
#if (QM_SENSOR)
mask = 0xff & (QM_SCSS_INT->int_mailbox_mask >> QM_MBOX_SS_MASK_OFFSET);
#else
mask = 0xff & QM_SCSS_INT->int_mailbox_mask;
#endif
for (i = 0; chall_sts; i++, chall_sts >>= 2) {
if ((chall_sts & QM_MBOX_CH_INT) == 0) {
continue;
}
if (mask & BIT(i)) {
continue;
}
if (mbox_reg[i].ch_sts & QM_MBOX_CH_INT) {
if (NULL != mailbox_devs[i].mpr_cb) {
/* Callback */
mailbox_devs[i].mpr_cb(mailbox_devs[i].cb_data);
}
/* Clear the interrupt */
mbox_reg[i].ch_sts = QM_MBOX_CH_INT;
}
}
QM_ISR_EOI(QM_IRQ_MBOX_VECTOR);
}
int qm_mbox_ch_set_config(const qm_mbox_ch_t mbox_ch, qm_mbox_callback_t mpr_cb,
void *cb_data, const bool irq_en)
{
uint32_t mask;
QM_CHECK(mbox_ch < QM_MBOX_CH_NUM, -EINVAL);
/* Block interrupts while configuring MBOX */
qm_irq_mask(QM_IRQ_MBOX);
#if (QM_SENSOR)
/* MBOX Interrupt Routing gate to SS core. */
mask = BIT((mbox_ch + QM_MBOX_SS_MASK_OFFSET));
#else
/* MBOX Interrupt Routing gate to LMT core. */
mask = BIT(mbox_ch);
#endif
/* Register callback function */
mailbox_devs[mbox_ch].mpr_cb = mpr_cb;
mailbox_devs[mbox_ch].cb_data = cb_data;
if (irq_en == true) {
/* Note: Routing is done now, cannot be done in irq_request! */
QM_SCSS_INT->int_mailbox_mask &= ~mask;
/* Clear the interrupt */
((qm_mailbox_t *)QM_SCSS_MAILBOX + mbox_ch)->ch_sts =
QM_MBOX_CH_INT;
} else {
/* Note: Routing is done now, cannot be done in irq_request! */
QM_SCSS_INT->int_mailbox_mask |= mask;
}
/* UnBlock MBOX interrupts. */
qm_irq_unmask(QM_IRQ_MBOX);
return 0;
}
int qm_mbox_ch_write(const qm_mbox_ch_t mbox_ch,
const qm_mbox_msg_t *const data)
{
qm_mailbox_t *const mbox_reg = (qm_mailbox_t *)QM_SCSS_MAILBOX +
mbox_ch;
/* Check if the previous message has been consumed. */
if (!(mbox_reg->ch_ctrl & QM_MBOX_TRIGGER_CH_INT)) {
/* Write the payload data to the mailbox channel. */
mbox_reg->ch_data[0] = data->data[QM_MBOX_PAYLOAD_0];
mbox_reg->ch_data[1] = data->data[QM_MBOX_PAYLOAD_1];
mbox_reg->ch_data[2] = data->data[QM_MBOX_PAYLOAD_2];
mbox_reg->ch_data[3] = data->data[QM_MBOX_PAYLOAD_3];
/* Write the control word and trigger the channel interrupt. */
mbox_reg->ch_ctrl = data->ctrl | QM_MBOX_TRIGGER_CH_INT;
return 0;
}
/* Previous message has not been consumed. */
return -EIO;
}
int qm_mbox_ch_read(const qm_mbox_ch_t mbox_ch, qm_mbox_msg_t *const data)
{
qm_mailbox_t *const mbox_reg = (qm_mailbox_t *)QM_SCSS_MAILBOX + mbox_ch;
/* Read data from the mailbox channel and clear bit 31 of the
* control word. */
data->ctrl = mbox_reg->ch_ctrl & ~QM_MBOX_TRIGGER_CH_INT;
data->data[QM_MBOX_PAYLOAD_0] = mbox_reg->ch_data[0];
data->data[QM_MBOX_PAYLOAD_1] = mbox_reg->ch_data[1];
data->data[QM_MBOX_PAYLOAD_2] = mbox_reg->ch_data[2];
data->data[QM_MBOX_PAYLOAD_3] = mbox_reg->ch_data[3];
/* Check if the message has arrived. */
if (mbox_reg->ch_sts & QM_MBOX_CH_DATA) {
/* Clear data status bit */
mbox_reg->ch_sts = QM_MBOX_CH_DATA;
return 0;
}
/* there is no new data in mailbox */
return -EIO;
}
int qm_mbox_ch_get_status(const qm_mbox_ch_t mbox_ch,
qm_mbox_ch_status_t *const status)
{
QM_CHECK(mbox_ch < QM_MBOX_CH_NUM, -EINVAL);
qm_mailbox_t *const mbox_reg = (qm_mailbox_t *)QM_SCSS_MAILBOX +
mbox_ch;
*status = mbox_reg->ch_sts &QM_MBOX_CH_STATUS_MASK;
return 0;
}
int qm_mbox_ch_data_ack(const qm_mbox_ch_t mbox_ch)
{
QM_CHECK(mbox_ch < QM_MBOX_CH_NUM, -EINVAL);
((qm_mailbox_t *)QM_SCSS_MAILBOX + mbox_ch)->ch_sts = QM_MBOX_CH_DATA;
return 0;
}

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@ -28,26 +28,27 @@
*/
#include "qm_mpr.h"
#include "qm_interrupt.h"
#define ADDRESS_MASK_7_BIT (0x7F)
#define ADDRESS_MASK_LOW_BOUND (0x7F)
#define ADDRESS_MASK_UP_BOUND (0x1FC00)
static void (*callback)(void);
static void (*callback)(void *data);
static void *callback_data;
void qm_mpr_isr(void)
QM_ISR_DECLARE(qm_mpr_isr)
{
(*callback)();
if (callback) {
(*callback)(callback_data);
}
QM_MPR->mpr_vsts = QM_MPR_VSTS_VALID;
QM_ISR_EOI(QM_IRQ_SRAM_VECTOR);
}
qm_rc_t qm_mpr_set_config(const qm_mpr_id_t id,
const qm_mpr_config_t *const cfg)
int qm_mpr_set_config(const qm_mpr_id_t id, const qm_mpr_config_t *const cfg)
{
QM_CHECK(id < QM_MPR_NUM, QM_RC_EINVAL);
QM_CHECK(cfg != NULL, QM_RC_EINVAL);
QM_CHECK(id < QM_MPR_NUM, -EINVAL);
QM_CHECK(cfg != NULL, -EINVAL);
QM_MPR->mpr_cfg[id] &= ~QM_MPR_EN_LOCK_MASK;
@ -62,54 +63,41 @@ qm_rc_t qm_mpr_set_config(const qm_mpr_id_t id,
/* enable/lock */
QM_MPR->mpr_cfg[id] |= (cfg->en_lock_mask << QM_MPR_EN_LOCK_OFFSET);
return QM_RC_OK;
return 0;
}
qm_rc_t qm_mpr_get_config(const qm_mpr_id_t id, qm_mpr_config_t *const cfg)
int qm_mpr_set_violation_policy(const qm_mpr_viol_mode_t mode,
qm_mpr_callback_t callback_fn,
void *callback_data)
{
QM_CHECK(id < QM_MPR_NUM, QM_RC_EINVAL);
QM_CHECK(cfg != NULL, QM_RC_EINVAL);
cfg->low_bound = QM_MPR->mpr_cfg[id] & ADDRESS_MASK_LOW_BOUND;
cfg->up_bound = (QM_MPR->mpr_cfg[id] & ADDRESS_MASK_UP_BOUND) >>
QM_MPR_UP_BOUND_OFFSET;
cfg->agent_read_en_mask =
(QM_MPR->mpr_cfg[id] & QM_MPR_RD_EN_MASK) >> QM_MPR_RD_EN_OFFSET;
cfg->agent_write_en_mask =
(QM_MPR->mpr_cfg[id] & QM_MPR_WR_EN_MASK) >> QM_MPR_WR_EN_OFFSET;
cfg->en_lock_mask = (QM_MPR->mpr_cfg[id] & QM_MPR_EN_LOCK_MASK) >>
QM_MPR_EN_LOCK_OFFSET;
return QM_RC_OK;
}
qm_rc_t qm_mpr_set_violation_policy(const qm_mpr_viol_mode_t mode,
qm_mpr_callback_t callback_fn)
{
QM_CHECK(mode <= MPR_VIOL_MODE_PROBE, QM_RC_EINVAL);
QM_CHECK(mode <= MPR_VIOL_MODE_PROBE, -EINVAL);
/* interrupt mode */
if (MPR_VIOL_MODE_INTERRUPT == mode) {
QM_CHECK(callback_fn != NULL, QM_RC_EINVAL);
callback = callback_fn;
callback_data = callback_data;
/* host interrupt to Lakemont core */
/* unmask interrupt */
qm_irq_unmask(QM_IRQ_SRAM);
#if defined(QM_SENSOR)
QM_SCSS_INT->int_sram_controller_mask |=
QM_INT_SRAM_CONTROLLER_SS_HALT_MASK;
#else /* QM_SENSOR */
QM_SCSS_INT->int_sram_controller_mask |=
QM_INT_SRAM_CONTROLLER_HOST_HALT_MASK;
QM_SCSS_INT->int_sram_controller_mask &=
~QM_INT_SRAM_CONTROLLER_HOST_MASK;
#endif /* QM_SENSOR */
}
/* probe or reset mode */
else {
/* host halt interrupt to Lakemont core */
QM_SCSS_INT->int_sram_controller_mask |=
QM_INT_SRAM_CONTROLLER_HOST_MASK;
/* mask interrupt */
qm_irq_mask(QM_IRQ_SRAM);
#if defined(QM_SENSOR)
QM_SCSS_INT->int_sram_controller_mask &=
~QM_INT_SRAM_CONTROLLER_SS_HALT_MASK;
#else /* QM_SENSOR */
QM_SCSS_INT->int_sram_controller_mask &=
~QM_INT_SRAM_CONTROLLER_HOST_HALT_MASK;
#endif /* QM_SENSOR */
if (MPR_VIOL_MODE_PROBE == mode) {
@ -129,5 +117,5 @@ qm_rc_t qm_mpr_set_violation_policy(const qm_mpr_viol_mode_t mode,
~QM_P_STS_HALT_INTERRUPT_REDIRECTION;
}
}
return QM_RC_OK;
return 0;
}

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@ -28,7 +28,6 @@
*/
#include "qm_pic_timer.h"
#include "qm_interrupt.h"
/*
* PIC timer access layer. Supports both Local APIC timer and MVIC timer.
@ -41,7 +40,8 @@
#define LVTTIMER_MODE_PERIODIC_OFFS (17)
#define LVTTIMER_INT_MASK_OFFS (16)
static void (*callback)(void);
static void (*callback)(void *data);
static void *callback_data;
#if (HAS_APIC)
#define PIC_TIMER (QM_LAPIC)
@ -49,10 +49,10 @@ static void (*callback)(void);
#define PIC_TIMER (QM_PIC_TIMER)
#endif
void qm_pic_timer_isr(void)
QM_ISR_DECLARE(qm_pic_timer_isr)
{
if (callback) {
callback();
callback(callback_data);
}
#if (HAS_APIC)
@ -63,10 +63,10 @@ void qm_pic_timer_isr(void)
#endif
}
qm_rc_t qm_pic_timer_set_config(const qm_pic_timer_config_t *const cfg)
int qm_pic_timer_set_config(const qm_pic_timer_config_t *const cfg)
{
QM_CHECK(cfg != NULL, QM_RC_EINVAL);
QM_CHECK(cfg->mode <= QM_PIC_TIMER_MODE_PERIODIC, QM_RC_EINVAL);
QM_CHECK(cfg != NULL, -EINVAL);
QM_CHECK(cfg->mode <= QM_PIC_TIMER_MODE_PERIODIC, -EINVAL);
/* Stop timer, mask interrupt and program interrupt vector */
PIC_TIMER->timer_icr.reg = 0;
@ -84,33 +84,25 @@ qm_rc_t qm_pic_timer_set_config(const qm_pic_timer_config_t *const cfg)
PIC_TIMER->lvttimer.reg |= cfg->mode << LVTTIMER_MODE_PERIODIC_OFFS;
callback = cfg->callback;
callback_data = cfg->callback_data;
if (cfg->int_en) {
PIC_TIMER->lvttimer.reg &= ~BIT(LVTTIMER_INT_MASK_OFFS);
}
return QM_RC_OK;
return 0;
}
qm_rc_t qm_pic_timer_get_config(qm_pic_timer_config_t *const cfg)
{
QM_CHECK(cfg != NULL, QM_RC_EINVAL);
cfg->mode =
(PIC_TIMER->lvttimer.reg >> LVTTIMER_MODE_PERIODIC_OFFS) & 1;
cfg->int_en =
(PIC_TIMER->lvttimer.reg & BIT(LVTTIMER_INT_MASK_OFFS)) == 0
? true
: false;
cfg->callback = callback;
return QM_RC_OK;
}
qm_rc_t qm_pic_timer_set(const uint32_t count)
int qm_pic_timer_set(const uint32_t count)
{
PIC_TIMER->timer_icr.reg = count;
return QM_RC_OK;
return 0;
}
uint32_t qm_pic_timer_get()
int qm_pic_timer_get(uint32_t *const count)
{
return PIC_TIMER->timer_ccr.reg;
QM_CHECK(count != NULL, -EINVAL);
*count = PIC_TIMER->timer_ccr.reg;
return 0;
}

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@ -33,70 +33,88 @@
#define MASK_1BIT (0x1)
#define MASK_2BIT (0x3)
static __inline__ uint8_t pin2reg(qm_pin_id_t pin, uint8_t bit_width)
/**
* Calculate the register index for a specific pin.
*
* @param[in] pin The pin to be used.
* @param[in] width The width in bits for each pin in the register.
*
* @return The register index of the given pin.
*/
static uint32_t pin_to_register(uint32_t pin, uint32_t width)
{
return (pin / (32 / bit_width));
return (pin / (32 / width));
}
static __inline__ uint8_t pin2reg_offs(qm_pin_id_t pin, uint8_t bit_width)
/**
* Calculate the offset for a pin within a register.
*
* @param[in] pin The pin to be used.
* @param[in] width The width in bits for each pin in the register.
*
* @return The offset for the pin within the register.
*/
static uint32_t pin_to_offset(uint32_t pin, uint32_t width)
{
return ((pin % (32 / bit_width)) * bit_width);
return ((pin % (32 / width)) * width);
}
qm_rc_t qm_pmux_select(qm_pin_id_t pin, qm_pmux_fn_t fn)
int qm_pmux_select(const qm_pin_id_t pin, const qm_pmux_fn_t fn)
{
QM_CHECK(pin < QM_PIN_ID_NUM, QM_RC_EINVAL);
QM_CHECK(fn <= QM_PMUX_FN_3, QM_RC_EINVAL);
QM_CHECK(pin < QM_PIN_ID_NUM, -EINVAL);
QM_CHECK(fn <= QM_PMUX_FN_3, -EINVAL);
uint8_t reg = pin2reg(pin, 2);
uint8_t offs = pin2reg_offs(pin, 2);
uint32_t reg = pin_to_register(pin, 2);
uint32_t offs = pin_to_offset(pin, 2);
QM_SCSS_PMUX->pmux_sel[reg] &= ~(MASK_2BIT << offs);
QM_SCSS_PMUX->pmux_sel[reg] |= (fn << offs);
return QM_RC_OK;
return 0;
}
qm_rc_t qm_pmux_set_slew(qm_pin_id_t pin, qm_pmux_slew_t slew)
int qm_pmux_set_slew(const qm_pin_id_t pin, const qm_pmux_slew_t slew)
{
QM_CHECK(pin < QM_PIN_ID_NUM, QM_RC_EINVAL);
QM_CHECK(slew < QM_PMUX_SLEW_NUM, QM_RC_EINVAL);
QM_CHECK(pin < QM_PIN_ID_NUM, -EINVAL);
QM_CHECK(slew < QM_PMUX_SLEW_NUM, -EINVAL);
uint8_t reg = pin2reg(pin, 1);
uint8_t offs = pin2reg_offs(pin, 1);
uint32_t reg = pin_to_register(pin, 1);
uint32_t mask = MASK_1BIT << pin_to_offset(pin, 1);
QM_SCSS_PMUX->pmux_slew[reg] &= ~(MASK_1BIT << offs);
QM_SCSS_PMUX->pmux_slew[reg] |= (slew << offs);
return QM_RC_OK;
if (slew == 0) {
QM_SCSS_PMUX->pmux_slew[reg] &= ~mask;
} else {
QM_SCSS_PMUX->pmux_slew[reg] |= mask;
}
return 0;
}
qm_rc_t qm_pmux_input_en(qm_pin_id_t pin, bool enable)
int qm_pmux_input_en(const qm_pin_id_t pin, const bool enable)
{
QM_CHECK(pin < QM_PIN_ID_NUM, QM_RC_EINVAL);
QM_CHECK(pin < QM_PIN_ID_NUM, -EINVAL);
uint8_t reg = pin2reg(pin, 1);
uint8_t offs = pin2reg_offs(pin, 1);
uint32_t reg = pin_to_register(pin, 1);
uint32_t mask = MASK_1BIT << pin_to_offset(pin, 1);
enable &= MASK_1BIT;
QM_SCSS_PMUX->pmux_in_en[reg] &= ~(MASK_1BIT << offs);
QM_SCSS_PMUX->pmux_in_en[reg] |= (enable << offs);
return QM_RC_OK;
if (enable == false) {
QM_SCSS_PMUX->pmux_in_en[reg] &= ~mask;
} else {
QM_SCSS_PMUX->pmux_in_en[reg] |= mask;
}
return 0;
}
qm_rc_t qm_pmux_pullup_en(qm_pin_id_t pin, bool enable)
int qm_pmux_pullup_en(const qm_pin_id_t pin, const bool enable)
{
QM_CHECK(pin < QM_PIN_ID_NUM, QM_RC_EINVAL);
QM_CHECK(pin < QM_PIN_ID_NUM, -EINVAL);
uint8_t reg = pin2reg(pin, 1);
uint8_t offs = pin2reg_offs(pin, 1);
uint32_t reg = pin_to_register(pin, 1);
uint32_t mask = MASK_1BIT << pin_to_offset(pin, 1);
enable &= MASK_1BIT;
QM_SCSS_PMUX->pmux_pullup[reg] &= ~(MASK_1BIT << offs);
QM_SCSS_PMUX->pmux_pullup[reg] |= (enable << offs);
return QM_RC_OK;
if (enable == false) {
QM_SCSS_PMUX->pmux_pullup[reg] &= ~mask;
} else {
QM_SCSS_PMUX->pmux_pullup[reg] |= mask;
}
return 0;
}

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@ -29,9 +29,11 @@
#include "qm_pwm.h"
static void (*callback[QM_PWM_NUM])(uint32_t int_status);
static void (*callback[QM_PWM_NUM])(void *data, uint32_t int_status);
void qm_pwm_isr_0(void)
static void *callback_data[QM_PWM_NUM];
QM_ISR_DECLARE(qm_pwm_isr_0)
{
/* Which timers fired. */
uint32_t int_status = QM_PWM[QM_PWM_0].timersintstatus;
@ -39,42 +41,42 @@ void qm_pwm_isr_0(void)
QM_PWM[QM_PWM_0].timerseoi;
if (callback[QM_PWM_0]) {
(*callback[QM_PWM_0])(int_status);
(*callback[QM_PWM_0])(callback_data[QM_PWM_0], int_status);
}
QM_ISR_EOI(QM_IRQ_PWM_0_VECTOR);
}
qm_rc_t qm_pwm_start(const qm_pwm_t pwm, const qm_pwm_id_t id)
int qm_pwm_start(const qm_pwm_t pwm, const qm_pwm_id_t id)
{
QM_CHECK(pwm < QM_PWM_NUM, QM_RC_EINVAL);
QM_CHECK(id < QM_PWM_ID_NUM, QM_RC_EINVAL);
QM_CHECK(pwm < QM_PWM_NUM, -EINVAL);
QM_CHECK(id < QM_PWM_ID_NUM, -EINVAL);
QM_PWM[pwm].timer[id].controlreg |= PWM_START;
return QM_RC_OK;
return 0;
}
qm_rc_t qm_pwm_stop(const qm_pwm_t pwm, const qm_pwm_id_t id)
int qm_pwm_stop(const qm_pwm_t pwm, const qm_pwm_id_t id)
{
QM_CHECK(pwm < QM_PWM_NUM, QM_RC_EINVAL);
QM_CHECK(id < QM_PWM_ID_NUM, QM_RC_EINVAL);
QM_CHECK(pwm < QM_PWM_NUM, -EINVAL);
QM_CHECK(id < QM_PWM_ID_NUM, -EINVAL);
QM_PWM[pwm].timer[id].controlreg &= ~PWM_START;
return QM_RC_OK;
return 0;
}
qm_rc_t qm_pwm_set_config(const qm_pwm_t pwm, const qm_pwm_id_t id,
const qm_pwm_config_t *const cfg)
int qm_pwm_set_config(const qm_pwm_t pwm, const qm_pwm_id_t id,
const qm_pwm_config_t *const cfg)
{
QM_CHECK(pwm < QM_PWM_NUM, QM_RC_EINVAL);
QM_CHECK(id < QM_PWM_ID_NUM, QM_RC_EINVAL);
QM_CHECK(cfg != NULL, QM_RC_EINVAL);
QM_CHECK(cfg->mode <= QM_PWM_MODE_PWM, QM_RC_EINVAL);
QM_CHECK(0 < cfg->lo_count, QM_RC_EINVAL);
QM_CHECK(pwm < QM_PWM_NUM, -EINVAL);
QM_CHECK(id < QM_PWM_ID_NUM, -EINVAL);
QM_CHECK(cfg != NULL, -EINVAL);
QM_CHECK(cfg->mode <= QM_PWM_MODE_PWM, -EINVAL);
QM_CHECK(0 < cfg->lo_count, -EINVAL);
/* If mode is PWM, hi_count must be > 0, otherwise don't care. */
QM_CHECK(cfg->mode == QM_PWM_MODE_PWM ? 0 != cfg->hi_count : 1,
QM_RC_EINVAL);
-EINVAL);
QM_PWM[pwm].timer[id].loadcount = cfg->lo_count - 1;
QM_PWM[pwm].timer[id].controlreg =
@ -83,59 +85,40 @@ qm_rc_t qm_pwm_set_config(const qm_pwm_t pwm, const qm_pwm_id_t id,
/* Assign user callback function. */
callback[pwm] = cfg->callback;
callback_data[pwm] = cfg->callback_data;
return QM_RC_OK;
return 0;
}
qm_rc_t qm_pwm_get_config(const qm_pwm_t pwm, const qm_pwm_id_t id,
qm_pwm_config_t *const cfg)
int qm_pwm_set(const qm_pwm_t pwm, const qm_pwm_id_t id,
const uint32_t lo_count, const uint32_t hi_count)
{
QM_CHECK(pwm < QM_PWM_NUM, QM_RC_EINVAL);
QM_CHECK(id < QM_PWM_ID_NUM, QM_RC_EINVAL);
QM_CHECK(cfg != NULL, QM_RC_EINVAL);
cfg->lo_count = QM_PWM[pwm].timer[id].loadcount;
cfg->mode = (QM_PWM[pwm].timer[id].controlreg & QM_PWM_CONF_MODE_MASK);
cfg->mask_interrupt =
(QM_PWM[pwm].timer[id].controlreg & QM_PWM_CONF_INT_EN_MASK) >>
QM_PWM_INTERRUPT_MASK_OFFSET;
cfg->hi_count = QM_PWM[pwm].timer_loadcount2[id];
/* Get interrupt callback function. */
cfg->callback = callback[pwm];
return QM_RC_OK;
}
qm_rc_t qm_pwm_set(const qm_pwm_t pwm, const qm_pwm_id_t id,
const uint32_t lo_count, const uint32_t hi_count)
{
QM_CHECK(pwm < QM_PWM_NUM, QM_RC_EINVAL);
QM_CHECK(id < QM_PWM_ID_NUM, QM_RC_EINVAL);
QM_CHECK(0 < lo_count, QM_RC_EINVAL);
QM_CHECK(pwm < QM_PWM_NUM, -EINVAL);
QM_CHECK(id < QM_PWM_ID_NUM, -EINVAL);
QM_CHECK(0 < lo_count, -EINVAL);
/* If mode is PWM, hi_count must be > 0, otherwise don't care. */
QM_CHECK(((QM_PWM[pwm].timer[id].controlreg & QM_PWM_CONF_MODE_MASK) ==
QM_PWM_MODE_PWM
? 0 < hi_count
: 1),
QM_RC_EINVAL);
-EINVAL);
QM_PWM[pwm].timer[id].loadcount = lo_count - 1;
QM_PWM[pwm].timer_loadcount2[id] = hi_count - 1;
return QM_RC_OK;
return 0;
}
qm_rc_t qm_pwm_get(const qm_pwm_t pwm, const qm_pwm_id_t id,
uint32_t *const lo_count, uint32_t *const hi_count)
int qm_pwm_get(const qm_pwm_t pwm, const qm_pwm_id_t id,
uint32_t *const lo_count, uint32_t *const hi_count)
{
QM_CHECK(pwm < QM_PWM_NUM, QM_RC_EINVAL);
QM_CHECK(id < QM_PWM_ID_NUM, QM_RC_EINVAL);
QM_CHECK(lo_count != NULL, QM_RC_EINVAL);
QM_CHECK(hi_count != NULL, QM_RC_EINVAL);
QM_CHECK(pwm < QM_PWM_NUM, -EINVAL);
QM_CHECK(id < QM_PWM_ID_NUM, -EINVAL);
QM_CHECK(lo_count != NULL, -EINVAL);
QM_CHECK(hi_count != NULL, -EINVAL);
*lo_count = QM_PWM[pwm].timer[id].loadcount;
*hi_count = QM_PWM[pwm].timer_loadcount2[id];
return QM_RC_OK;
return 0;
}

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@ -29,15 +29,16 @@
#include "qm_rtc.h"
static void (*callback[QM_RTC_NUM])(void);
static void (*callback[QM_RTC_NUM])(void *data);
static void *callback_data[QM_RTC_NUM];
void qm_rtc_isr_0(void)
QM_ISR_DECLARE(qm_rtc_isr_0)
{
/* Disable RTC interrupt */
QM_RTC[QM_RTC_0].rtc_ccr &= ~QM_RTC_CCR_INTERRUPT_ENABLE;
if (callback[QM_RTC_0]) {
(callback[QM_RTC_0])();
(callback[QM_RTC_0])(callback_data[QM_RTC_0]);
}
/* clear interrupt */
@ -45,10 +46,10 @@ void qm_rtc_isr_0(void)
QM_ISR_EOI(QM_IRQ_RTC_0_VECTOR);
}
qm_rc_t qm_rtc_set_config(const qm_rtc_t rtc, const qm_rtc_config_t *const cfg)
int qm_rtc_set_config(const qm_rtc_t rtc, const qm_rtc_config_t *const cfg)
{
QM_CHECK(rtc < QM_RTC_NUM, QM_RC_EINVAL);
QM_CHECK(cfg != NULL, QM_RC_EINVAL);
QM_CHECK(rtc < QM_RTC_NUM, -EINVAL);
QM_CHECK(cfg != NULL, -EINVAL);
/* set rtc divider */
clk_rtc_set_div(QM_RTC_DIVIDER);
@ -59,6 +60,7 @@ qm_rc_t qm_rtc_set_config(const qm_rtc_t rtc, const qm_rtc_config_t *const cfg)
QM_RTC[rtc].rtc_eoi;
callback[rtc] = cfg->callback;
callback_data[rtc] = cfg->callback_data;
if (cfg->alarm_en) {
qm_rtc_set_alarm(rtc, cfg->alarm_val);
@ -67,12 +69,12 @@ qm_rc_t qm_rtc_set_config(const qm_rtc_t rtc, const qm_rtc_config_t *const cfg)
QM_RTC[rtc].rtc_ccr &= ~QM_RTC_CCR_INTERRUPT_ENABLE;
}
return QM_RC_OK;
return 0;
}
qm_rc_t qm_rtc_set_alarm(const qm_rtc_t rtc, const uint32_t alarm_val)
int qm_rtc_set_alarm(const qm_rtc_t rtc, const uint32_t alarm_val)
{
QM_CHECK(rtc < QM_RTC_NUM, QM_RC_EINVAL);
QM_CHECK(rtc < QM_RTC_NUM, -EINVAL);
/* Enable RTC interrupt */
QM_RTC[rtc].rtc_ccr |= QM_RTC_CCR_INTERRUPT_ENABLE;
@ -80,18 +82,5 @@ qm_rc_t qm_rtc_set_alarm(const qm_rtc_t rtc, const uint32_t alarm_val)
/* set alarm val */
QM_RTC[rtc].rtc_cmr = alarm_val;
return QM_RC_OK;
}
qm_rc_t qm_rtc_get_config(const qm_rtc_t rtc, qm_rtc_config_t *const cfg)
{
QM_CHECK(rtc < QM_RTC_NUM, QM_RC_EINVAL);
QM_CHECK(cfg != NULL, QM_RC_EINVAL);
cfg->init_val = QM_RTC[rtc].rtc_clr;
cfg->alarm_en = (QM_RTC[rtc].rtc_ccr & QM_RTC_CCR_INTERRUPT_ENABLE);
cfg->alarm_val = QM_RTC[rtc].rtc_cmr;
cfg->callback = callback[rtc];
return QM_RC_OK;
return 0;
}

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@ -29,47 +29,42 @@
#include "qm_spi.h"
/* SPI Ctrlr0 register */
#define SPI_CTRLR0_DFS_32_MASK (0x001F0000)
#define SPI_CTRLR0_TMOD_MASK (0x00000300)
#define SPI_CTRLR0_SCPOL_SCPH_MASK (0x000000C0)
#define SPI_CTRLR0_FRF_MASK (0x00000030)
#define SPI_CTRLR0_DFS_32_OFFSET (16)
#define SPI_CTRLR0_TMOD_OFFSET (8)
#define SPI_CTRLR0_SCPOL_SCPH_OFFSET (6)
#define SPI_CTRLR0_FRF_OFFSET (4)
/* SPI Status register */
#define SPI_SR_BUSY BIT(0)
#define SPI_SR_TFNF BIT(1)
#define SPI_SR_TFE BIT(2)
/* SPI Interrupt Mask register */
#define SPI_IMR_TXEIM BIT(0)
#define SPI_IMR_TXOIM BIT(1)
#define SPI_IMR_RXUIM BIT(2)
#define SPI_IMR_RXOIM BIT(3)
#define SPI_IMR_RXFIM BIT(4)
/* SPI Interrupt Status register */
#define SPI_ISR_TXEIS BIT(0)
#define SPI_ISR_TXOIS BIT(1)
#define SPI_ISR_RXUIS BIT(2)
#define SPI_ISR_RXOIS BIT(3)
#define SPI_ISR_RXFIS BIT(4)
/* SPI Raw Interrupt Status register */
#define SPI_RISR_TXEIR BIT(0)
#define SPI_RISR_TXOIR BIT(1)
#define SPI_RISR_RXUIR BIT(2)
#define SPI_RISR_RXOIR BIT(3)
#define SPI_RISR_RXFIR BIT(4)
/* SPI FIFO size defaults */
#define SPI_DEFAULT_TX_THRESHOLD (0x05)
#define SPI_DEFAULT_RX_THRESHOLD (0x05)
#define SPI_FIFOS_DEPTH (8)
/* SPI DMA transmit watermark level. When the number of valid data entries in
* the transmit FIFO is equal to or below this field value, dma_tx_req is
* generated. The burst length has to fit in the remaining space of the transmit
* FIFO, i.e. the burst length cannot be bigger than (16 - watermark level). */
#define SPI_DMATDLR_DMATDL (0x03)
#define SPI_DMA_WRITE_BURST_LENGTH QM_DMA_BURST_TRANS_LENGTH_4
/* SPI DMA receive watermark level. When the number of valid data entries in the
* receive FIFO is equal to or above this field value + 1, dma_rx_req is
* generated. The burst length has to match the watermark level so that the
* exact number of data entries fit one burst, and therefore only some values
* are allowed:
* DMARDL DMA read burst length
* 0 1
* 3 4
* 7 (highest) 8
*/
#define SPI_DMARDLR_DMARDL (0x03)
#define SPI_DMA_READ_BURST_LENGTH QM_DMA_BURST_TRANS_LENGTH_4
/* Arbitrary byte sent in RX-only mode. */
#define SPI_RX_ONLY_DUMMY_BYTE (0xf0)
/* DMA transfer information, relevant on callback invocations from the DMA
* driver. */
typedef struct {
qm_spi_t spi_id; /**< SPI controller identifier. */
qm_dma_channel_id_t dma_channel_id; /**< Used DMA channel. */
volatile bool cb_pending; /**< True if waiting for DMA calllback. */
} dma_context_t;
/**
* Extern qm_spi_reg_t* array declared at qm_soc_regs.h .
*/
@ -83,16 +78,22 @@ qm_spi_reg_t *qm_spi_controllers[QM_SPI_NUM] = {
#endif
#endif
static qm_spi_async_transfer_t *spi_async_transfer[QM_SPI_NUM];
static volatile uint32_t tx_counter[QM_SPI_NUM], rx_counter[QM_SPI_NUM];
static const qm_spi_async_transfer_t *spi_async_transfer[QM_SPI_NUM];
static volatile uint16_t tx_counter[QM_SPI_NUM], rx_counter[QM_SPI_NUM];
static uint8_t dfs[QM_SPI_NUM];
static uint32_t tx_dummy_frame;
static const uint32_t tx_dummy_frame = SPI_RX_ONLY_DUMMY_BYTE;
static qm_spi_tmode_t tmode[QM_SPI_NUM];
/* DMA (memory to SPI controller) callback information. */
static dma_context_t dma_context_tx[QM_SPI_NUM];
/* DMA (SPI controller to memory) callback information. */
static dma_context_t dma_context_rx[QM_SPI_NUM];
/* DMA core being used by each SPI controller. */
static qm_dma_t dma_core[QM_SPI_NUM];
static void read_frame(const qm_spi_t spi, uint8_t *const rx_buffer)
{
const qm_spi_reg_t *const controller = QM_SPI[spi];
uint8_t frame_size = dfs[spi];
const uint8_t frame_size = dfs[spi];
if (frame_size == 1) {
*(uint8_t *)rx_buffer = controller->dr[0];
@ -103,10 +104,10 @@ static void read_frame(const qm_spi_t spi, uint8_t *const rx_buffer)
}
}
static void write_frame(const qm_spi_t spi, uint8_t *const tx_buffer)
static void write_frame(const qm_spi_t spi, const uint8_t *const tx_buffer)
{
qm_spi_reg_t *const controller = QM_SPI[spi];
uint8_t frame_size = dfs[spi];
const uint8_t frame_size = dfs[spi];
if (frame_size == 1) {
controller->dr[0] = *(uint8_t *)tx_buffer;
@ -120,11 +121,11 @@ static void write_frame(const qm_spi_t spi, uint8_t *const tx_buffer)
static void wait_for_controller(const qm_spi_reg_t *const controller)
{
/* Page 42 of databook says you must poll TFE status waiting for 1
* before checking SPI_SR_BUSY.
* before checking QM_SPI_SR_BUSY.
*/
while (!(controller->sr & SPI_SR_TFE))
while (!(controller->sr & QM_SPI_SR_TFE))
;
while (controller->sr & SPI_SR_BUSY)
while (controller->sr & QM_SPI_SR_BUSY)
;
}
@ -134,10 +135,10 @@ static void wait_for_controller(const qm_spi_reg_t *const controller)
* @brief Interrupt based transfer on SPI.
* @param [in] spi Which SPI to transfer from.
*/
static __inline void handle_rx_interrupt(const qm_spi_t spi)
static __inline__ void handle_rx_interrupt(const qm_spi_t spi)
{
qm_spi_reg_t *const controller = QM_SPI[spi];
qm_spi_async_transfer_t *const transfer = spi_async_transfer[spi];
const qm_spi_async_transfer_t *const transfer = spi_async_transfer[spi];
/* Jump to the right position of RX buffer.
* If no bytes were received before, we start from the beginning,
@ -155,8 +156,14 @@ static __inline void handle_rx_interrupt(const qm_spi_t spi)
*/
if (transfer->rx_len == rx_counter[spi]) {
controller->imr &=
~(SPI_IMR_RXUIM | SPI_IMR_RXOIM | SPI_IMR_RXFIM);
transfer->rx_callback(transfer->id, transfer->rx_len);
~(QM_SPI_IMR_RXUIM | QM_SPI_IMR_RXOIM |
QM_SPI_IMR_RXFIM);
if (transfer->callback &&
tmode[spi] == QM_SPI_TMOD_RX) {
transfer->callback(transfer->callback_data, 0,
QM_SPI_IDLE,
transfer->rx_len);
}
break;
}
}
@ -164,10 +171,8 @@ static __inline void handle_rx_interrupt(const qm_spi_t spi)
/* Check if enough data will arrive to trigger an interrupt and adjust
* rxftlr accordingly.
*/
uint32_t frames_left = transfer->rx_len - rx_counter[spi];
if (!frames_left) {
controller->rxftlr = SPI_DEFAULT_RX_THRESHOLD;
} else if (frames_left <= controller->rxftlr) {
const uint32_t frames_left = transfer->rx_len - rx_counter[spi];
if (frames_left <= controller->rxftlr) {
controller->rxftlr = frames_left - 1;
}
}
@ -178,16 +183,16 @@ static __inline void handle_rx_interrupt(const qm_spi_t spi)
* @brief Interrupt based transfer on SPI.
* @param [in] spi Which SPI to transfer to.
*/
static __inline void handle_tx_interrupt(const qm_spi_t spi)
static __inline__ void handle_tx_interrupt(const qm_spi_t spi)
{
qm_spi_reg_t *const controller = QM_SPI[spi];
qm_spi_async_transfer_t *const transfer = spi_async_transfer[spi];
const qm_spi_async_transfer_t *const transfer = spi_async_transfer[spi];
/* Jump to the right position of TX buffer.
* If no bytes were transmitted before, we start from the beginning,
* otherwise we jump to the next frame to be sent.
*/
uint8_t *tx_buffer = transfer->tx + (tx_counter[spi] * dfs[spi]);
const uint8_t *tx_buffer = transfer->tx + (tx_counter[spi] * dfs[spi]);
int frames =
SPI_FIFOS_DEPTH - controller->txflr - controller->rxflr - 1;
@ -208,57 +213,61 @@ static __inline void handle_tx_interrupt(const qm_spi_t spi)
static void handle_spi_interrupt(const qm_spi_t spi)
{
qm_spi_reg_t *const controller = QM_SPI[spi];
qm_spi_async_transfer_t *transfer = spi_async_transfer[spi];
uint32_t int_status = controller->isr;
const qm_spi_async_transfer_t *transfer = spi_async_transfer[spi];
const uint32_t int_status = controller->isr;
QM_ASSERT((int_status & (QM_SPI_ISR_TXOIS | QM_SPI_ISR_RXUIS)) == 0);
if (int_status & QM_SPI_ISR_RXOIS) {
if (transfer->callback) {
transfer->callback(transfer->callback_data, -EIO,
QM_SPI_RX_OVERFLOW,
rx_counter[spi]);
}
QM_ASSERT((int_status & (SPI_ISR_TXOIS | SPI_ISR_RXUIS)) == 0);
if (int_status & SPI_ISR_RXOIS) {
transfer->err_callback(transfer->id, QM_RC_SPI_RX_OE);
controller->rxoicr;
controller->imr = 0;
controller->imr = QM_SPI_IMR_MASK_ALL;
controller->ssienr = 0;
return;
}
if (int_status & SPI_ISR_RXFIS) {
if (int_status & QM_SPI_ISR_RXFIS) {
handle_rx_interrupt(spi);
}
if (transfer->rx_len == rx_counter[spi] &&
transfer->tx_len == tx_counter[spi] &&
(controller->sr & SPI_SR_TFE) && !(controller->sr & SPI_SR_BUSY)) {
controller->txftlr = SPI_DEFAULT_TX_THRESHOLD;
controller->imr = 0;
(controller->sr & QM_SPI_SR_TFE) &&
!(controller->sr & QM_SPI_SR_BUSY)) {
controller->imr = QM_SPI_IMR_MASK_ALL;
controller->ssienr = 0;
if (tmode[spi] != QM_SPI_TMOD_RX) {
transfer->tx_callback(transfer->id, transfer->tx_len);
if (transfer->callback && tmode[spi] != QM_SPI_TMOD_RX) {
transfer->callback(transfer->callback_data, 0,
QM_SPI_IDLE, transfer->tx_len);
}
return;
}
if (int_status & SPI_ISR_TXEIS && transfer->tx_len > tx_counter[spi]) {
if (int_status & QM_SPI_ISR_TXEIS &&
transfer->tx_len > tx_counter[spi]) {
handle_tx_interrupt(spi);
}
}
qm_rc_t qm_spi_set_config(const qm_spi_t spi, const qm_spi_config_t *cfg)
int qm_spi_set_config(const qm_spi_t spi, const qm_spi_config_t *cfg)
{
QM_CHECK(spi < QM_SPI_NUM, QM_RC_EINVAL);
QM_CHECK(cfg, QM_RC_EINVAL);
QM_CHECK(spi < QM_SPI_NUM, -EINVAL);
QM_CHECK(cfg, -EINVAL);
QM_ASSERT(QM_SPI[spi]->ssienr == 0);
qm_spi_reg_t *const controller = QM_SPI[spi];
/* Apply the selected cfg options */
controller->ctrlr0 = (cfg->frame_size << SPI_CTRLR0_DFS_32_OFFSET) |
(cfg->transfer_mode << SPI_CTRLR0_TMOD_OFFSET) |
(cfg->bus_mode << SPI_CTRLR0_SCPOL_SCPH_OFFSET);
controller->txftlr = SPI_DEFAULT_TX_THRESHOLD;
controller->rxftlr = SPI_DEFAULT_RX_THRESHOLD;
controller->ctrlr0 = (cfg->frame_size << QM_SPI_CTRLR0_DFS_32_OFFSET) |
(cfg->transfer_mode << QM_SPI_CTRLR0_TMOD_OFFSET) |
(cfg->bus_mode << QM_SPI_CTRLR0_SCPOL_SCPH_OFFSET);
controller->baudr = cfg->clk_divider;
@ -272,66 +281,62 @@ qm_rc_t qm_spi_set_config(const qm_spi_t spi, const qm_spi_config_t *cfg)
tmode[spi] = cfg->transfer_mode;
return QM_RC_OK;
return 0;
}
qm_rc_t qm_spi_get_config(const qm_spi_t spi, qm_spi_config_t *const cfg)
int qm_spi_slave_select(const qm_spi_t spi, const qm_spi_slave_select_t ss)
{
QM_CHECK(spi < QM_SPI_NUM, QM_RC_EINVAL);
QM_CHECK(cfg, QM_RC_EINVAL);
qm_spi_reg_t *const controller = QM_SPI[spi];
cfg->transfer_mode = (controller->ctrlr0 & SPI_CTRLR0_TMOD_MASK) >>
SPI_CTRLR0_TMOD_OFFSET;
cfg->bus_mode = (controller->ctrlr0 & SPI_CTRLR0_SCPOL_SCPH_MASK) >>
SPI_CTRLR0_SCPOL_SCPH_OFFSET;
cfg->frame_size = (controller->ctrlr0 & SPI_CTRLR0_DFS_32_MASK) >>
SPI_CTRLR0_DFS_32_OFFSET;
cfg->clk_divider = controller->baudr;
return QM_RC_OK;
}
qm_rc_t qm_spi_slave_select(const qm_spi_t spi, const qm_spi_slave_select_t ss)
{
QM_CHECK(spi < QM_SPI_NUM, QM_RC_EINVAL);
QM_CHECK(spi < QM_SPI_NUM, -EINVAL);
/* Check if the device reports as busy. */
QM_ASSERT(!(QM_SPI[spi]->sr & SPI_SR_BUSY));
QM_ASSERT(!(QM_SPI[spi]->sr & QM_SPI_SR_BUSY));
QM_SPI[spi]->ser = ss;
return QM_RC_OK;
return 0;
}
qm_spi_status_t qm_spi_get_status(const qm_spi_t spi)
int qm_spi_get_status(const qm_spi_t spi, qm_spi_status_t *const status)
{
QM_CHECK(spi < QM_SPI_NUM, QM_SPI_EINVAL);
QM_CHECK(spi < QM_SPI_NUM, -EINVAL);
QM_CHECK(status, -EINVAL);
if (QM_SPI[spi]->sr & SPI_SR_BUSY) {
return QM_SPI_BUSY;
qm_spi_reg_t *const controller = QM_SPI[spi];
if (controller->sr & QM_SPI_SR_BUSY) {
*status = QM_SPI_BUSY;
} else {
return QM_SPI_FREE;
*status = QM_SPI_IDLE;
}
if (controller->risr & QM_SPI_RISR_RXOIR) {
*status = QM_SPI_RX_OVERFLOW;
}
return 0;
}
qm_rc_t qm_spi_transfer(const qm_spi_t spi, qm_spi_transfer_t *const xfer)
int qm_spi_transfer(const qm_spi_t spi, const qm_spi_transfer_t *const xfer,
qm_spi_status_t *const status)
{
QM_CHECK(spi < QM_SPI_NUM, QM_RC_EINVAL);
QM_CHECK(xfer, QM_RC_EINVAL);
QM_CHECK(spi < QM_SPI_NUM, -EINVAL);
QM_CHECK(xfer, -EINVAL);
QM_CHECK(tmode[spi] == QM_SPI_TMOD_TX_RX
? (xfer->tx_len == xfer->rx_len)
: 1,
QM_RC_EINVAL);
-EINVAL);
QM_CHECK(tmode[spi] == QM_SPI_TMOD_TX ? (xfer->rx_len == 0) : 1,
QM_RC_EINVAL);
-EINVAL);
QM_CHECK(tmode[spi] == QM_SPI_TMOD_RX ? (xfer->tx_len == 0) : 1,
QM_RC_EINVAL);
-EINVAL);
QM_CHECK(tmode[spi] == QM_SPI_TMOD_EEPROM_READ
? (xfer->tx_len && xfer->rx_len)
: 1,
-EINVAL);
uint32_t i_tx = xfer->tx_len;
uint32_t i_rx = xfer->rx_len;
qm_rc_t rc = QM_RC_OK;
int rc = 0;
qm_spi_reg_t *const controller = QM_SPI[spi];
@ -339,7 +344,7 @@ qm_rc_t qm_spi_transfer(const qm_spi_t spi, qm_spi_transfer_t *const xfer)
wait_for_controller(controller);
/* Mask all interrupts, this is a blocking function. */
controller->imr = 0;
controller->imr = QM_SPI_IMR_MASK_ALL;
/* If we are in RX only or EEPROM Read mode, the ctrlr1 reg holds how
* many bytes the controller solicits, minus 1. */
@ -348,13 +353,13 @@ qm_rc_t qm_spi_transfer(const qm_spi_t spi, qm_spi_transfer_t *const xfer)
}
/* Enable SPI device */
controller->ssienr = 1;
controller->ssienr = QM_SPI_SSIENR_SSIENR;
/* Transfer is only complete when all the tx data is sent and all
* expected rx data has been received.
*/
uint8_t *rx_buffer = xfer->rx;
uint8_t *tx_buffer = xfer->tx;
const uint8_t *tx_buffer = xfer->tx;
int frames;
@ -362,13 +367,16 @@ qm_rc_t qm_spi_transfer(const qm_spi_t spi, qm_spi_transfer_t *const xfer)
* This is covered by the databook on page 42.
*/
if (tmode[spi] == QM_SPI_TMOD_RX) {
tx_buffer = (uint8_t*)&tx_dummy_frame;
tx_buffer = (uint8_t *)&tx_dummy_frame;
i_tx = 1;
}
while (i_tx || i_rx) {
if (controller->risr & SPI_RISR_RXOIR) {
rc = QM_RC_SPI_RX_OE;
if (controller->risr & QM_SPI_RISR_RXOIR) {
rc = -EIO;
if (status) {
*status |= QM_SPI_RX_OVERFLOW;
}
controller->rxoicr;
break;
}
@ -404,28 +412,23 @@ qm_rc_t qm_spi_transfer(const qm_spi_t spi, qm_spi_transfer_t *const xfer)
return rc;
}
qm_rc_t qm_spi_irq_transfer(const qm_spi_t spi,
qm_spi_async_transfer_t *const xfer)
int qm_spi_irq_transfer(const qm_spi_t spi,
const qm_spi_async_transfer_t *const xfer)
{
QM_CHECK(spi < QM_SPI_NUM, QM_RC_EINVAL);
QM_CHECK(xfer, QM_RC_EINVAL);
QM_CHECK(spi < QM_SPI_NUM, -EINVAL);
QM_CHECK(xfer, -EINVAL);
QM_CHECK(tmode[spi] == QM_SPI_TMOD_TX_RX
? (xfer->tx_len == xfer->rx_len)
: 1,
QM_RC_EINVAL);
QM_CHECK(tmode[spi] == QM_SPI_TMOD_TX_RX
? (xfer->tx_callback && xfer->rx_callback)
-EINVAL);
QM_CHECK(tmode[spi] == QM_SPI_TMOD_TX ? (xfer->rx_len == 0) : 1,
-EINVAL);
QM_CHECK(tmode[spi] == QM_SPI_TMOD_RX ? (xfer->tx_len == 0) : 1,
-EINVAL);
QM_CHECK(tmode[spi] == QM_SPI_TMOD_EEPROM_READ
? (xfer->tx_len && xfer->rx_len)
: 1,
QM_RC_EINVAL);
QM_CHECK(tmode[spi] == QM_SPI_TMOD_TX
? (xfer->tx_callback && (xfer->rx_len == 0))
: 1,
QM_RC_EINVAL);
QM_CHECK(tmode[spi] == QM_SPI_TMOD_RX
? (xfer->rx_callback && (xfer->tx_len == 0))
: 1,
QM_RC_EINVAL);
QM_CHECK(xfer->err_callback, QM_RC_EINVAL);
-EINVAL);
qm_spi_reg_t *const controller = QM_SPI[spi];
@ -440,6 +443,7 @@ qm_rc_t qm_spi_irq_transfer(const qm_spi_t spi,
? xfer->rx_len - 1
: SPI_DEFAULT_RX_THRESHOLD;
}
controller->txftlr = SPI_DEFAULT_TX_THRESHOLD;
spi_async_transfer[spi] = xfer;
tx_counter[spi] = 0;
@ -447,58 +451,426 @@ qm_rc_t qm_spi_irq_transfer(const qm_spi_t spi,
/* Unmask interrupts */
if (tmode[spi] == QM_SPI_TMOD_TX) {
controller->imr = SPI_IMR_TXEIM | SPI_IMR_TXOIM;
controller->imr = QM_SPI_IMR_TXEIM | QM_SPI_IMR_TXOIM;
} else if (tmode[spi] == QM_SPI_TMOD_RX) {
controller->imr = SPI_IMR_RXUIM | SPI_IMR_RXOIM |
SPI_IMR_RXFIM;
controller->ssienr = 1;
write_frame(spi, (uint8_t*)&tx_dummy_frame);
controller->imr =
QM_SPI_IMR_RXUIM | QM_SPI_IMR_RXOIM | QM_SPI_IMR_RXFIM;
controller->ssienr = QM_SPI_SSIENR_SSIENR;
write_frame(spi, (uint8_t *)&tx_dummy_frame);
} else {
controller->imr = SPI_IMR_TXEIM | SPI_IMR_TXOIM |
SPI_IMR_RXUIM | SPI_IMR_RXOIM |
SPI_IMR_RXFIM;
controller->imr = QM_SPI_IMR_TXEIM | QM_SPI_IMR_TXOIM |
QM_SPI_IMR_RXUIM | QM_SPI_IMR_RXOIM |
QM_SPI_IMR_RXFIM;
}
controller->ssienr = 1; /** Enable SPI Device */
controller->ssienr = QM_SPI_SSIENR_SSIENR; /** Enable SPI Device */
return QM_RC_OK;
return 0;
}
void qm_spi_master_0_isr(void)
QM_ISR_DECLARE(qm_spi_master_0_isr)
{
handle_spi_interrupt(QM_SPI_MST_0);
QM_ISR_EOI(QM_IRQ_SPI_MASTER_0_VECTOR);
}
#if (QUARK_SE)
void qm_spi_master_1_isr(void)
QM_ISR_DECLARE(qm_spi_master_1_isr)
{
handle_spi_interrupt(QM_SPI_MST_1);
QM_ISR_EOI(QM_IRQ_SPI_MASTER_1_VECTOR);
}
#endif
qm_rc_t qm_spi_transfer_terminate(const qm_spi_t spi)
int qm_spi_irq_transfer_terminate(const qm_spi_t spi)
{
QM_CHECK(spi < QM_SPI_NUM, QM_RC_EINVAL);
QM_CHECK(spi < QM_SPI_NUM, -EINVAL);
qm_spi_reg_t *const controller = QM_SPI[spi];
qm_spi_async_transfer_t *const transfer = spi_async_transfer[spi];
const qm_spi_async_transfer_t *const transfer = spi_async_transfer[spi];
/* Mask the interrupts */
controller->imr = 0;
controller->imr = QM_SPI_IMR_MASK_ALL;
controller->ssienr = 0; /** Disable SPI device */
if (transfer->tx_callback != NULL) {
transfer->tx_callback(transfer->id, tx_counter[spi]);
}
if (transfer->callback) {
uint16_t len = 0;
if (tmode[spi] == QM_SPI_TMOD_TX ||
tmode[spi] == QM_SPI_TMOD_TX_RX) {
len = tx_counter[spi];
if (transfer->rx_callback != NULL) {
transfer->rx_callback(transfer->id, rx_counter[spi]);
} else {
len = rx_counter[spi];
}
/*
* NOTE: change this to return controller-specific code
* 'user aborted'.
*/
transfer->callback(transfer->callback_data, -ECANCELED,
QM_SPI_IDLE, len);
}
tx_counter[spi] = 0;
rx_counter[spi] = 0;
return QM_RC_OK;
return 0;
}
/* DMA driver invoked callback. */
static void spi_dma_callback(void *callback_context, uint32_t len,
int error_code)
{
QM_ASSERT(callback_context);
int client_error = 0;
uint32_t frames_expected;
volatile bool *cb_pending_alternate_p;
/* The DMA driver returns a pointer to a dma_context struct from which
* we find out the corresponding SPI device and transfer direction. */
dma_context_t *const dma_context_p = callback_context;
const qm_spi_t spi = dma_context_p->spi_id;
QM_ASSERT(spi < QM_SPI_NUM);
qm_spi_reg_t *const controller = QM_SPI[spi];
const qm_spi_async_transfer_t *const transfer = spi_async_transfer[spi];
QM_ASSERT(transfer);
const uint8_t frame_size = dfs[spi];
QM_ASSERT((frame_size == 1) || (frame_size == 2) || (frame_size == 4));
/* DMA driver returns length in bytes but user expects number of frames.
*/
const uint32_t frames_transfered = len / frame_size;
QM_ASSERT((dma_context_p == &dma_context_tx[spi]) ||
(dma_context_p == &dma_context_rx[spi]));
if (dma_context_p == &dma_context_tx[spi]) {
/* TX transfer. */
frames_expected = transfer->tx_len;
cb_pending_alternate_p = &dma_context_rx[spi].cb_pending;
} else if (dma_context_p == &dma_context_rx[spi]) {
/* RX tranfer. */
frames_expected = transfer->rx_len;
cb_pending_alternate_p = &dma_context_tx[spi].cb_pending;
} else {
return;
}
QM_ASSERT(cb_pending_alternate_p);
QM_ASSERT(dma_context_p->cb_pending);
dma_context_p->cb_pending = false;
if (error_code) {
/* Transfer failed, pass to client the error code returned by
* the DMA driver. */
client_error = error_code;
} else if (false == *cb_pending_alternate_p) {
/* TX transfers invoke the callback before the TX data has been
* transmitted, we need to wait here. */
wait_for_controller(controller);
if (frames_transfered != frames_expected) {
QM_ASSERT(frames_transfered < frames_expected);
/* Callback triggered through a transfer terminate. */
client_error = -ECANCELED;
}
} else {
/* Controller busy due to alternate DMA channel active. */
return;
}
/* Disable DMA setting and SPI controller. */
controller->dmacr = 0;
controller->ssienr = 0;
if (transfer->callback) {
transfer->callback(transfer->callback_data, client_error,
QM_SPI_IDLE, frames_transfered);
}
}
int qm_spi_dma_channel_config(
const qm_spi_t spi, const qm_dma_t dma_ctrl_id,
const qm_dma_channel_id_t dma_channel_id,
const qm_dma_channel_direction_t dma_channel_direction)
{
QM_CHECK(spi < QM_SPI_NUM, -EINVAL);
QM_CHECK(dma_ctrl_id < QM_DMA_NUM, -EINVAL);
QM_CHECK(dma_channel_id < QM_DMA_CHANNEL_NUM, -EINVAL);
int ret = -EINVAL;
dma_context_t *dma_context_p = NULL;
qm_dma_channel_config_t dma_chan_cfg = {0};
dma_chan_cfg.handshake_polarity = QM_DMA_HANDSHAKE_POLARITY_HIGH;
dma_chan_cfg.channel_direction = dma_channel_direction;
dma_chan_cfg.client_callback = spi_dma_callback;
/* Every data transfer performed by the DMA core corresponds to an SPI
* data frame, the SPI uses the number of bits determined by a previous
* qm_spi_set_config call where the frame size was specified. */
switch (dfs[spi]) {
case 1:
dma_chan_cfg.source_transfer_width = QM_DMA_TRANS_WIDTH_8;
break;
case 2:
dma_chan_cfg.source_transfer_width = QM_DMA_TRANS_WIDTH_16;
break;
case 4:
dma_chan_cfg.source_transfer_width = QM_DMA_TRANS_WIDTH_32;
break;
default:
/* The DMA core cannot handle 3 byte frame sizes. */
return -EINVAL;
}
dma_chan_cfg.destination_transfer_width =
dma_chan_cfg.source_transfer_width;
switch (dma_channel_direction) {
case QM_DMA_MEMORY_TO_PERIPHERAL:
switch (spi) {
case QM_SPI_MST_0:
dma_chan_cfg.handshake_interface =
DMA_HW_IF_SPI_MASTER_0_TX;
break;
#if (QUARK_SE)
case QM_SPI_MST_1:
dma_chan_cfg.handshake_interface =
DMA_HW_IF_SPI_MASTER_1_TX;
break;
#endif
default:
/* Slave SPI is not supported. */
return -EINVAL;
}
/* The DMA burst length has to fit in the space remaining in the
* TX FIFO after the watermark level, DMATDLR. */
dma_chan_cfg.source_burst_length = SPI_DMA_WRITE_BURST_LENGTH;
dma_chan_cfg.destination_burst_length =
SPI_DMA_WRITE_BURST_LENGTH;
dma_context_p = &dma_context_tx[spi];
break;
case QM_DMA_PERIPHERAL_TO_MEMORY:
switch (spi) {
case QM_SPI_MST_0:
dma_chan_cfg.handshake_interface =
DMA_HW_IF_SPI_MASTER_0_RX;
break;
#if (QUARK_SE)
case QM_SPI_MST_1:
dma_chan_cfg.handshake_interface =
DMA_HW_IF_SPI_MASTER_1_RX;
break;
#endif
default:
/* Slave SPI is not supported. */
return -EINVAL;
}
/* The DMA burst length has to match the value of the receive
* watermark level, DMARDLR + 1. */
dma_chan_cfg.source_burst_length = SPI_DMA_READ_BURST_LENGTH;
dma_chan_cfg.destination_burst_length =
SPI_DMA_READ_BURST_LENGTH;
dma_context_p = &dma_context_rx[spi];
break;
default:
/* Memory to memory not allowed on SPI transfers. */
return -EINVAL;
}
/* The DMA driver needs a pointer to the client callback function so
* that later we can identify to which SPI controller the DMA callback
* corresponds to as well as whether we are dealing with a TX or RX
* dma_context struct. */
QM_ASSERT(dma_context_p);
dma_chan_cfg.callback_context = dma_context_p;
ret = qm_dma_channel_set_config(dma_ctrl_id, dma_channel_id,
&dma_chan_cfg);
if (ret) {
return ret;
}
/* To be used on received DMA callback. */
dma_context_p->spi_id = spi;
dma_context_p->dma_channel_id = dma_channel_id;
/* To be used on transfer setup. */
dma_core[spi] = dma_ctrl_id;
return 0;
}
int qm_spi_dma_transfer(const qm_spi_t spi,
const qm_spi_async_transfer_t *const xfer)
{
QM_CHECK(spi < QM_SPI_NUM, -EINVAL);
QM_CHECK(xfer, -EINVAL);
QM_CHECK(xfer->tx_len
? (xfer->tx &&
dma_context_tx[spi].dma_channel_id < QM_DMA_CHANNEL_NUM)
: 1,
-EINVAL);
QM_CHECK(xfer->rx_len
? (xfer->rx &&
dma_context_rx[spi].dma_channel_id < QM_DMA_CHANNEL_NUM)
: 1,
-EINVAL);
QM_CHECK(tmode[spi] == QM_SPI_TMOD_TX_RX ? (xfer->tx && xfer->rx) : 1,
-EINVAL);
QM_CHECK(tmode[spi] == QM_SPI_TMOD_TX_RX
? (xfer->tx_len == xfer->rx_len)
: 1,
-EINVAL);
QM_CHECK(tmode[spi] == QM_SPI_TMOD_TX ? (xfer->tx_len && !xfer->rx_len)
: 1,
-EINVAL);
QM_CHECK(tmode[spi] == QM_SPI_TMOD_RX ? (xfer->rx_len && !xfer->tx_len)
: 1,
-EINVAL);
QM_CHECK(tmode[spi] == QM_SPI_TMOD_EEPROM_READ
? (xfer->tx_len && xfer->rx_len)
: 1,
-EINVAL);
QM_CHECK(dma_core[spi] < QM_DMA_NUM, -EINVAL);
int ret;
qm_dma_transfer_t dma_trans = {0};
qm_spi_reg_t *const controller = QM_SPI[spi];
QM_ASSERT(0 == controller->ssienr);
/* Mask interrupts. */
controller->imr = QM_SPI_IMR_MASK_ALL;
if (xfer->rx_len) {
dma_trans.block_size = xfer->rx_len;
dma_trans.source_address = (uint32_t *)&controller->dr[0];
dma_trans.destination_address = (uint32_t *)xfer->rx;
ret = qm_dma_transfer_set_config(
dma_core[spi], dma_context_rx[spi].dma_channel_id,
&dma_trans);
if (ret) {
return ret;
}
/* In RX-only or EEPROM mode, the ctrlr1 register holds how
* many data frames the controller solicits, minus 1. */
controller->ctrlr1 = xfer->rx_len - 1;
}
if (xfer->tx_len) {
dma_trans.block_size = xfer->tx_len;
dma_trans.source_address = (uint32_t *)xfer->tx;
dma_trans.destination_address = (uint32_t *)&controller->dr[0];
ret = qm_dma_transfer_set_config(
dma_core[spi], dma_context_tx[spi].dma_channel_id,
&dma_trans);
if (ret) {
return ret;
}
}
/* Transfer pointer kept to extract user callback address and transfer
* client id when DMA completes. */
spi_async_transfer[spi] = xfer;
/* Enable the SPI device. */
controller->ssienr = QM_SPI_SSIENR_SSIENR;
if (xfer->rx_len) {
/* Enable receive DMA. */
controller->dmacr |= QM_SPI_DMACR_RDMAE;
/* Set the DMA receive threshold. */
controller->dmardlr = SPI_DMARDLR_DMARDL;
dma_context_rx[spi].cb_pending = true;
ret = qm_dma_transfer_start(dma_core[spi],
dma_context_rx[spi].dma_channel_id);
if (ret) {
dma_context_rx[spi].cb_pending = false;
/* Disable DMA setting and SPI controller. */
controller->dmacr = 0;
controller->ssienr = 0;
return ret;
}
if (!xfer->tx_len) {
/* In RX-only mode we need to transfer an initial dummy
* byte. */
write_frame(spi, (uint8_t *)&tx_dummy_frame);
}
}
if (xfer->tx_len) {
/* Enable transmit DMA. */
controller->dmacr |= QM_SPI_DMACR_TDMAE;
/* Set the DMA transmit threshold. */
controller->dmatdlr = SPI_DMATDLR_DMATDL;
dma_context_tx[spi].cb_pending = true;
ret = qm_dma_transfer_start(dma_core[spi],
dma_context_tx[spi].dma_channel_id);
if (ret) {
dma_context_tx[spi].cb_pending = false;
if (xfer->rx_len) {
/* If a RX transfer was previously started, we
* need to stop it - the SPI device will be
* disabled when handling the DMA callback. */
qm_spi_dma_transfer_terminate(spi);
} else {
/* Disable DMA setting and SPI controller. */
controller->dmacr = 0;
controller->ssienr = 0;
}
return ret;
}
}
return 0;
}
int qm_spi_dma_transfer_terminate(qm_spi_t spi)
{
QM_CHECK(spi < QM_SPI_NUM, -EINVAL);
QM_CHECK(dma_context_tx[spi].cb_pending
? (dma_context_tx[spi].dma_channel_id < QM_DMA_CHANNEL_NUM)
: 1,
-EINVAL);
QM_CHECK(dma_context_rx[spi].cb_pending
? (dma_context_rx[spi].dma_channel_id < QM_DMA_CHANNEL_NUM)
: 1,
-EINVAL);
int ret = 0;
if (dma_context_tx[spi].cb_pending) {
if (0 !=
qm_dma_transfer_terminate(
dma_core[spi], dma_context_tx[spi].dma_channel_id)) {
ret = -EIO;
}
}
if (dma_context_rx[spi].cb_pending) {
if (0 !=
qm_dma_transfer_terminate(
dma_core[spi], dma_context_rx[spi].dma_channel_id)) {
ret = -EIO;
}
}
return ret;
}

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@ -27,37 +27,70 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <errno.h>
#include "qm_uart.h"
/* 1/2 fifo full in RX, 1/2 fifo full in TX. */
#define QM_UART_DEFAULT_TX_RX_THRESHOLD (0xB0)
#define QM_UART_TX_0_RX_1_2_THRESHOLD (0x80)
#ifndef UNIT_TEST
qm_uart_reg_t *qm_uart[QM_UART_NUM] = {(qm_uart_reg_t *)QM_UART_0_BASE,
(qm_uart_reg_t *)QM_UART_1_BASE};
#endif
typedef void (*uart_client_callback_t)(void *data, int error,
qm_uart_status_t status, uint32_t len);
/**
* DMA transfer information, relevant on callback invocations from the DMA
* driver.
*/
typedef struct {
qm_dma_channel_id_t dma_channel_id; /**< DMA channel. */
const qm_uart_transfer_t *xfer; /**< User transfer structure. */
} dma_context_t;
/* UART Callback pointers. */
static void (*uart_write_callback[QM_UART_NUM])(uint32_t id, uint32_t len);
static void (*uart_read_callback[QM_UART_NUM])(uint32_t id, uint32_t len);
static void (*uart_write_err_callback[QM_UART_NUM])(uint32_t id,
qm_uart_status_t status);
static void (*uart_read_err_callback[QM_UART_NUM])(uint32_t id,
qm_uart_status_t status);
static uart_client_callback_t write_callback[QM_UART_NUM];
static uart_client_callback_t read_callback[QM_UART_NUM];
/* Txfer transaction ids. */
static uint32_t uart_write_id[QM_UART_NUM], uart_read_id[QM_UART_NUM];
/* User callback data. */
static void *write_data[QM_UART_NUM], *read_data[QM_UART_NUM];
/* Buffer pointers to store transmit / receive data for UART */
static uint8_t *uart_write_buffer[QM_UART_NUM], *uart_read_buffer[QM_UART_NUM];
static uint32_t uart_write_pos[QM_UART_NUM], uart_write_remaining[QM_UART_NUM];
static uint32_t uart_read_pos[QM_UART_NUM], uart_read_remaining[QM_UART_NUM];
static uint8_t *write_buffer[QM_UART_NUM], *read_buffer[QM_UART_NUM];
static uint32_t write_pos[QM_UART_NUM], write_len[QM_UART_NUM];
static uint32_t read_pos[QM_UART_NUM], read_len[QM_UART_NUM];
/* DMA (memory to UART) callback information. */
static dma_context_t dma_context_tx[QM_UART_NUM];
/* DMA (UART to memory) callback information. */
static dma_context_t dma_context_rx[QM_UART_NUM];
/* DMA core being used by each UART. */
static qm_dma_t dma_core[QM_UART_NUM];
static bool is_read_xfer_complete(const qm_uart_t uart)
{
return read_pos[uart] >= read_len[uart];
}
static bool is_write_xfer_complete(const qm_uart_t uart)
{
return write_pos[uart] >= write_len[uart];
}
static void qm_uart_isr_handler(const qm_uart_t uart)
{
uint32_t lsr;
uint8_t interrupt_id = QM_UART[uart].iir_fcr & QM_UART_IIR_IID_MASK;
qm_uart_reg_t *const regs = QM_UART[uart];
uint8_t interrupt_id = regs->iir_fcr & QM_UART_IIR_IID_MASK;
/* Is the transmit holding empty? */
if (interrupt_id == QM_UART_IIR_THR_EMPTY) {
if (!(uart_write_remaining[uart])) {
QM_UART[uart].ier_dlh &= ~QM_UART_IER_ETBEI;
/*
* Interrupt ID priority levels (from highest to lowest):
* 1: QM_UART_IIR_RECV_LINE_STATUS
* 2: QM_UART_IIR_RECV_DATA_AVAIL and QM_UART_IIR_CHAR_TIMEOUT
* 3: QM_UART_IIR_THR_EMPTY
*/
switch (interrupt_id) {
case QM_UART_IIR_THR_EMPTY:
if (is_write_xfer_complete(uart)) {
regs->ier_dlh &= ~QM_UART_IER_ETBEI;
/*
* At this point the FIFOs are empty, but the shift
* register still is transmitting the last 8 bits. So if
@ -65,205 +98,243 @@ static void qm_uart_isr_handler(const qm_uart_t uart)
* busy. Use the SCR Bit 0 to indicate an irq tx is
* complete.
*/
QM_UART[uart].scr |= BIT(0);
uart_write_callback[uart](uart_write_id[uart],
uart_write_pos[uart]);
regs->scr |= BIT(0);
if (write_callback[uart]) {
write_callback[uart](write_data[uart], 0,
QM_UART_IDLE,
write_pos[uart]);
}
return;
}
uint32_t i =
uart_write_remaining[uart] >= QM_UART_FIFO_HALF_DEPTH
? QM_UART_FIFO_HALF_DEPTH
: uart_write_remaining[uart];
while (i--) {
QM_UART[uart].rbr_thr_dll =
uart_write_buffer[uart][uart_write_pos[uart]++];
uart_write_remaining[uart]--;
/*
* If we are starting the transfer then the TX FIFO is empty.
* In that case we set 'count' variable to QM_UART_FIFO_DEPTH
* in order to take advantage of the whole FIFO capacity.
*/
int count = (write_pos[uart] == 0) ? QM_UART_FIFO_DEPTH
: QM_UART_FIFO_HALF_DEPTH;
while (count-- && !is_write_xfer_complete(uart)) {
regs->rbr_thr_dll =
write_buffer[uart][write_pos[uart]++];
}
/*
* Change the threshold level to trigger an interrupt when the
* TX buffer is empty.
*/
if (!(uart_write_remaining[uart])) {
QM_UART[uart].iir_fcr =
QM_UART_TX_0_RX_1_2_THRESHOLD | QM_UART_FCR_FIFOE;
}
}
/* Read any bytes on the line. */
lsr = QM_UART[uart].lsr &
(QM_UART_LSR_ERROR_BITS | QM_UART_LSR_DR | QM_UART_LSR_RFE);
while (lsr) {
/* If there's an error, tell the application. */
if (lsr & QM_UART_LSR_ERROR_BITS) {
uart_read_err_callback[uart](
uart_read_id[uart], lsr & QM_UART_LSR_ERROR_BITS);
if (is_write_xfer_complete(uart)) {
regs->iir_fcr = QM_UART_FCR_TX_0_RX_1_2_THRESHOLD |
QM_UART_FCR_FIFOE;
}
if ((lsr & QM_UART_LSR_DR) && uart_read_remaining[uart]) {
uart_read_buffer[uart][uart_read_pos[uart]++] =
QM_UART[uart].rbr_thr_dll;
uart_read_remaining[uart]--;
if (!(uart_read_remaining[uart])) {
/* Disable receive interrupts */
QM_UART[uart].ier_dlh &= ~QM_UART_IER_ERBFI;
uart_read_callback[uart](uart_read_id[uart],
uart_read_pos[uart]);
break;
/* It might have more data available in the
* RX FIFO which belongs to a subsequent
* transfer. So, since this read transfer
* has completed, should stop reading the
* LSR otherwise we might loop forever.
*/
case QM_UART_IIR_CHAR_TIMEOUT:
case QM_UART_IIR_RECV_DATA_AVAIL:
/*
* Copy data from RX FIFO to xfer buffer as long as the xfer
* has not completed and we have data in the RX FIFO.
*/
while (!is_read_xfer_complete(uart)) {
uint32_t lsr = regs->lsr;
/*
* A break condition may cause a line status interrupt
* to follow very closely after a char timeout
* interrupt, but reading the lsr effectively clears the
* pending interrupts so we issue here the callback
* instead, otherwise we would miss it.
* NOTE: Returned len is 0 for now, this might change
* in the future.
*/
if (lsr & QM_UART_LSR_ERROR_BITS) {
if (read_callback[uart]) {
read_callback[uart](
read_data[uart], -EIO,
lsr & QM_UART_LSR_ERROR_BITS, 0);
}
}
if (lsr & QM_UART_LSR_DR) {
read_buffer[uart][read_pos[uart]++] =
regs->rbr_thr_dll;
} else {
/* No more data in the RX FIFO */
break;
}
}
lsr = QM_UART[uart].lsr & (QM_UART_LSR_ERROR_BITS |
QM_UART_LSR_DR | QM_UART_LSR_RFE);
if (is_read_xfer_complete(uart)) {
/*
* Disable both 'Receiver Data Available' and
* 'Receiver Line Status' interrupts.
*/
regs->ier_dlh &=
~(QM_UART_IER_ERBFI | QM_UART_IER_ELSI);
if (read_callback[uart]) {
read_callback[uart](read_data[uart], 0,
QM_UART_IDLE,
read_pos[uart]);
}
}
break;
case QM_UART_IIR_RECV_LINE_STATUS:
if (read_callback[uart]) {
/*
* NOTE: Returned len is 0 for now, this might change
* in the future.
*/
read_callback[uart](read_data[uart], -EIO,
regs->lsr & QM_UART_LSR_ERROR_BITS,
0);
}
break;
}
}
void qm_uart_0_isr(void)
QM_ISR_DECLARE(qm_uart_0_isr)
{
qm_uart_isr_handler(QM_UART_0);
QM_ISR_EOI(QM_IRQ_UART_0_VECTOR);
}
void qm_uart_1_isr(void)
QM_ISR_DECLARE(qm_uart_1_isr)
{
qm_uart_isr_handler(QM_UART_1);
QM_ISR_EOI(QM_IRQ_UART_1_VECTOR);
}
qm_rc_t qm_uart_set_config(const qm_uart_t uart, const qm_uart_config_t *cfg)
int qm_uart_set_config(const qm_uart_t uart, const qm_uart_config_t *cfg)
{
QM_CHECK(uart < QM_UART_NUM, QM_RC_EINVAL);
QM_CHECK(cfg != NULL, QM_RC_EINVAL);
QM_CHECK(uart < QM_UART_NUM, -EINVAL);
QM_CHECK(cfg != NULL, -EINVAL);
qm_uart_reg_t *const regs = QM_UART[uart];
/* Clear DLAB by unsetting line parameters */
QM_UART[uart].lcr = 0;
regs->lcr = 0;
/* Set divisor latch registers (integer + fractional part) */
QM_UART[uart].lcr = QM_UART_LCR_DLAB;
QM_UART[uart].ier_dlh = QM_UART_CFG_BAUD_DLH_UNPACK(cfg->baud_divisor);
QM_UART[uart].rbr_thr_dll =
QM_UART_CFG_BAUD_DLL_UNPACK(cfg->baud_divisor);
QM_UART[uart].dlf = QM_UART_CFG_BAUD_DLF_UNPACK(cfg->baud_divisor);
regs->lcr = QM_UART_LCR_DLAB;
regs->ier_dlh = QM_UART_CFG_BAUD_DLH_UNPACK(cfg->baud_divisor);
regs->rbr_thr_dll = QM_UART_CFG_BAUD_DLL_UNPACK(cfg->baud_divisor);
regs->dlf = QM_UART_CFG_BAUD_DLF_UNPACK(cfg->baud_divisor);
/* Set line parameters. This also unsets the DLAB */
QM_UART[uart].lcr = cfg->line_control;
regs->lcr = cfg->line_control;
/* Hardware automatic flow control */
QM_UART[uart].mcr = 0;
regs->mcr = 0;
if (true == cfg->hw_fc) {
QM_UART[uart].mcr |= QM_UART_MCR_AFCE | QM_UART_MCR_RTS;
regs->mcr |= QM_UART_MCR_AFCE | QM_UART_MCR_RTS;
}
/* FIFO's enable and reset, set interrupt threshold */
QM_UART[uart].iir_fcr =
regs->iir_fcr =
(QM_UART_FCR_FIFOE | QM_UART_FCR_RFIFOR | QM_UART_FCR_XFIFOR |
QM_UART_DEFAULT_TX_RX_THRESHOLD);
QM_UART[uart].ier_dlh |= QM_UART_IER_PTIME;
QM_UART_FCR_DEFAULT_TX_RX_THRESHOLD);
regs->ier_dlh |= QM_UART_IER_PTIME;
return QM_RC_OK;
return 0;
}
qm_rc_t qm_uart_get_config(const qm_uart_t uart, qm_uart_config_t *cfg)
int qm_uart_get_status(const qm_uart_t uart, qm_uart_status_t *const status)
{
QM_CHECK(uart < QM_UART_NUM, QM_RC_EINVAL);
QM_CHECK(cfg != NULL, QM_RC_EINVAL);
QM_CHECK(uart < QM_UART_NUM, -EINVAL);
QM_CHECK(status != NULL, -EINVAL);
qm_uart_reg_t *const regs = QM_UART[uart];
uint32_t lsr = regs->lsr;
cfg->baud_divisor = 0;
cfg->hw_fc = 0;
QM_UART[uart].lcr |= QM_UART_LCR_DLAB;
cfg->baud_divisor = QM_UART_CFG_BAUD_DL_PACK(QM_UART[uart].ier_dlh,
QM_UART[uart].rbr_thr_dll,
QM_UART[uart].dlf);
QM_UART[uart].lcr &= ~QM_UART_LCR_DLAB;
cfg->line_control = QM_UART[uart].lcr;
cfg->hw_fc = QM_UART[uart].mcr & QM_UART_MCR_AFCE;
return QM_RC_OK;
}
qm_uart_status_t qm_uart_get_status(const qm_uart_t uart)
{
qm_uart_status_t ret = QM_UART_IDLE;
uint32_t lsr = QM_UART[uart].lsr;
ret |= (lsr & (QM_UART_LSR_OE | QM_UART_LSR_PE | QM_UART_LSR_FE |
QM_UART_LSR_BI));
*status = (lsr & (QM_UART_LSR_OE | QM_UART_LSR_PE | QM_UART_LSR_FE |
QM_UART_LSR_BI));
/*
* Check as an IRQ TX completed, if so, the Shift register may still be
* busy.
*/
if (QM_UART[uart].scr & BIT(0)) {
QM_UART[uart].scr &= ~BIT(0);
if (regs->scr & BIT(0)) {
regs->scr &= ~BIT(0);
} else if (!(lsr & (QM_UART_LSR_TEMT))) {
ret |= QM_UART_TX_BUSY;
*status |= QM_UART_TX_BUSY;
}
if (lsr & QM_UART_LSR_DR) {
ret |= QM_UART_RX_BUSY;
*status |= QM_UART_RX_BUSY;
}
return ret;
return 0;
}
qm_rc_t qm_uart_write(const qm_uart_t uart, const uint8_t data)
int qm_uart_write(const qm_uart_t uart, const uint8_t data)
{
QM_CHECK(uart < QM_UART_NUM, QM_RC_EINVAL);
QM_CHECK(uart < QM_UART_NUM, -EINVAL);
while (QM_UART[uart].lsr & QM_UART_LSR_THRE) {
qm_uart_reg_t *const regs = QM_UART[uart];
while (regs->lsr & QM_UART_LSR_THRE) {
}
QM_UART[uart].rbr_thr_dll = data;
regs->rbr_thr_dll = data;
/* Wait for transaction to complete. */
while (!(QM_UART[uart].lsr & QM_UART_LSR_TEMT)) {
while (!(regs->lsr & QM_UART_LSR_TEMT)) {
}
return QM_RC_OK;
return 0;
}
qm_uart_status_t qm_uart_read(const qm_uart_t uart, uint8_t *data)
int qm_uart_read(const qm_uart_t uart, uint8_t *const data,
qm_uart_status_t *status)
{
QM_CHECK(uart < QM_UART_NUM, QM_UART_EINVAL);
QM_CHECK(data != NULL, QM_UART_EINVAL);
QM_CHECK(uart < QM_UART_NUM, -EINVAL);
QM_CHECK(data != NULL, -EINVAL);
uint32_t lsr = QM_UART[uart].lsr;
qm_uart_reg_t *const regs = QM_UART[uart];
uint32_t lsr = regs->lsr;
while (!(lsr & QM_UART_LSR_DR)) {
lsr = QM_UART[uart].lsr;
lsr = regs->lsr;
}
/* Check are there any errors on the line. */
/* Check if there are any errors on the line. */
if (lsr & QM_UART_LSR_ERROR_BITS) {
return (lsr & QM_UART_LSR_ERROR_BITS);
if (status) {
*status = (lsr & QM_UART_LSR_ERROR_BITS);
}
return -EIO;
}
*data = QM_UART[uart].rbr_thr_dll;
*data = regs->rbr_thr_dll;
return QM_UART_OK;
return 0;
}
qm_rc_t qm_uart_write_non_block(const qm_uart_t uart, const uint8_t data)
int qm_uart_write_non_block(const qm_uart_t uart, const uint8_t data)
{
QM_CHECK(uart < QM_UART_NUM, QM_RC_EINVAL);
QM_CHECK(uart < QM_UART_NUM, -EINVAL);
QM_UART[uart].rbr_thr_dll = data;
qm_uart_reg_t *const regs = QM_UART[uart];
return QM_RC_OK;
regs->rbr_thr_dll = data;
return 0;
}
uint8_t qm_uart_read_non_block(const qm_uart_t uart)
int qm_uart_read_non_block(const qm_uart_t uart, uint8_t *const data)
{
return QM_UART[uart].rbr_thr_dll;
QM_CHECK(uart < QM_UART_NUM, -EINVAL);
QM_CHECK(data != NULL, -EINVAL);
qm_uart_reg_t *const regs = QM_UART[uart];
*data = regs->rbr_thr_dll;
return 0;
}
qm_rc_t qm_uart_write_buffer(const qm_uart_t uart, const uint8_t *const data,
uint32_t len)
int qm_uart_write_buffer(const qm_uart_t uart, const uint8_t *const data,
uint32_t len)
{
QM_CHECK(uart < QM_UART_NUM, QM_RC_EINVAL);
QM_CHECK(data != NULL, QM_RC_EINVAL);
QM_CHECK(uart < QM_UART_NUM, -EINVAL);
QM_CHECK(data != NULL, -EINVAL);
qm_uart_reg_t *const regs = QM_UART[uart];
uint8_t *d = (uint8_t *)data;
@ -272,98 +343,293 @@ qm_rc_t qm_uart_write_buffer(const qm_uart_t uart, const uint8_t *const data,
* Because FCR_FIFOE and IER_PTIME are enabled, LSR_THRE
* behaves as a TX FIFO full indicator.
*/
while (QM_UART[uart].lsr & QM_UART_LSR_THRE) {
while (regs->lsr & QM_UART_LSR_THRE) {
}
QM_UART[uart].rbr_thr_dll = *d;
regs->rbr_thr_dll = *d;
d++;
}
/* Wait for transaction to complete. */
while (!(QM_UART[uart].lsr & QM_UART_LSR_TEMT)) {
while (!(regs->lsr & QM_UART_LSR_TEMT)) {
}
return QM_RC_OK;
return 0;
}
qm_uart_status_t qm_uart_irq_write(const qm_uart_t uart,
const qm_uart_transfer_t *const xfer)
int qm_uart_irq_write(const qm_uart_t uart,
const qm_uart_transfer_t *const xfer)
{
QM_CHECK(uart < QM_UART_NUM, QM_UART_EINVAL);
QM_CHECK(xfer != NULL, QM_UART_EINVAL);
QM_CHECK(xfer->fin_callback, QM_UART_EINVAL);
QM_CHECK(xfer->err_callback, QM_UART_EINVAL);
QM_CHECK(uart < QM_UART_NUM, -EINVAL);
QM_CHECK(xfer != NULL, -EINVAL);
qm_uart_status_t ret = QM_UART_TX_BUSY;
qm_uart_reg_t *const regs = QM_UART[uart];
if (!(QM_UART_TX_BUSY & qm_uart_get_status(uart))) {
ret = QM_UART_OK;
write_pos[uart] = 0;
write_len[uart] = xfer->data_len;
write_buffer[uart] = xfer->data;
write_callback[uart] = xfer->callback;
write_data[uart] = xfer->callback_data;
uart_write_pos[uart] = 0;
uart_write_remaining[uart] = xfer->data_len;
uart_write_buffer[uart] = xfer->data;
uart_write_callback[uart] = xfer->fin_callback;
uart_write_err_callback[uart] = xfer->err_callback;
uart_write_id[uart] = xfer->id;
/* Set threshold */
regs->iir_fcr =
(QM_UART_FCR_FIFOE | QM_UART_FCR_DEFAULT_TX_RX_THRESHOLD);
/* Set threshold */
QM_UART[uart].iir_fcr =
(QM_UART_FCR_FIFOE | QM_UART_DEFAULT_TX_RX_THRESHOLD);
/* Enable TX holding reg empty interrupt. */
regs->ier_dlh |= QM_UART_IER_ETBEI;
/* Enable TX holding reg empty interrupt. */
QM_UART[uart].ier_dlh |= QM_UART_IER_ETBEI;
}
return ret;
return 0;
}
qm_uart_status_t qm_uart_irq_read(const qm_uart_t uart,
const qm_uart_transfer_t *const xfer)
int qm_uart_irq_read(const qm_uart_t uart, const qm_uart_transfer_t *const xfer)
{
QM_CHECK(uart < QM_UART_NUM, QM_UART_EINVAL);
QM_CHECK(xfer != NULL, QM_UART_EINVAL);
QM_CHECK(xfer->fin_callback, QM_UART_EINVAL);
QM_CHECK(xfer->err_callback, QM_UART_EINVAL);
QM_CHECK(uart < QM_UART_NUM, -EINVAL);
QM_CHECK(xfer != NULL, -EINVAL);
qm_uart_status_t ret = QM_UART_RX_BUSY;
qm_uart_reg_t *const regs = QM_UART[uart];
if (0 == uart_read_remaining[uart]) {
ret = QM_UART_OK;
read_pos[uart] = 0;
read_len[uart] = xfer->data_len;
read_buffer[uart] = xfer->data;
read_callback[uart] = xfer->callback;
read_data[uart] = xfer->callback_data;
uart_read_pos[uart] = 0;
uart_read_remaining[uart] = xfer->data_len;
uart_read_buffer[uart] = xfer->data;
uart_read_callback[uart] = xfer->fin_callback;
uart_read_err_callback[uart] = xfer->err_callback;
uart_read_id[uart] = xfer->id;
/* Set threshold */
regs->iir_fcr =
(QM_UART_FCR_FIFOE | QM_UART_FCR_DEFAULT_TX_RX_THRESHOLD);
/* Set threshold */
QM_UART[uart].iir_fcr =
(QM_UART_FCR_FIFOE | QM_UART_DEFAULT_TX_RX_THRESHOLD);
/*
* Enable both 'Receiver Data Available' and 'Receiver
* Line Status' interrupts.
*/
regs->ier_dlh |= QM_UART_IER_ERBFI | QM_UART_IER_ELSI;
/* Enable RX interrupt. */
QM_UART[uart].ier_dlh |= QM_UART_IER_ERBFI;
}
return ret;
return 0;
}
qm_rc_t qm_uart_write_terminate(const qm_uart_t uart)
int qm_uart_irq_write_terminate(const qm_uart_t uart)
{
QM_CHECK(uart < QM_UART_NUM, QM_RC_EINVAL);
QM_CHECK(uart < QM_UART_NUM, -EINVAL);
qm_uart_reg_t *const regs = QM_UART[uart];
/* Disable TX holding reg empty interrupt. */
QM_UART[uart].ier_dlh &= ~QM_UART_IER_ETBEI;
uart_write_callback[uart](uart_write_id[uart], uart_write_pos[uart]);
uart_write_remaining[uart] = 0;
regs->ier_dlh &= ~QM_UART_IER_ETBEI;
if (write_callback[uart]) {
write_callback[uart](write_data[uart], -ECANCELED, QM_UART_IDLE,
write_pos[uart]);
}
write_len[uart] = 0;
return QM_RC_OK;
return 0;
}
qm_rc_t qm_uart_read_terminate(const qm_uart_t uart)
int qm_uart_irq_read_terminate(const qm_uart_t uart)
{
QM_CHECK(uart < QM_UART_NUM, QM_RC_EINVAL);
QM_CHECK(uart < QM_UART_NUM, -EINVAL);
/* Disable receive interrupts */
QM_UART[uart].ier_dlh &= ~QM_UART_IER_ERBFI;
uart_read_callback[uart](uart_read_id[uart], uart_read_pos[uart]);
uart_read_remaining[uart] = 0;
qm_uart_reg_t *const regs = QM_UART[uart];
return QM_RC_OK;
/*
* Disable both 'Receiver Data Available' and 'Receiver Line Status'
* interrupts.
*/
regs->ier_dlh &= ~(QM_UART_IER_ERBFI | QM_UART_IER_ELSI);
if (read_callback[uart]) {
read_callback[uart](read_data[uart], -ECANCELED, QM_UART_IDLE,
read_pos[uart]);
}
read_len[uart] = 0;
return 0;
}
/* DMA driver invoked callback. */
static void uart_dma_callback(void *callback_context, uint32_t len,
int error_code)
{
QM_ASSERT(callback_context);
const qm_uart_transfer_t *const xfer =
((dma_context_t *)callback_context)->xfer;
QM_ASSERT(xfer);
const uart_client_callback_t client_callback = xfer->callback;
void *const client_data = xfer->callback_data;
const uint32_t client_expected_len = xfer->data_len;
if (!client_callback) {
return;
}
if (error_code) {
/*
* Transfer failed, pass to client the error code returned by
* the DMA driver.
*/
client_callback(client_data, error_code, QM_UART_IDLE, 0);
} else if (len == client_expected_len) {
/* Transfer completed successfully. */
client_callback(client_data, 0, QM_UART_IDLE, len);
} else {
QM_ASSERT(len < client_expected_len);
/* Transfer cancelled. */
client_callback(client_data, -ECANCELED, QM_UART_IDLE, len);
}
}
int qm_uart_dma_channel_config(
const qm_uart_t uart, const qm_dma_t dma_ctrl_id,
const qm_dma_channel_id_t dma_channel_id,
const qm_dma_channel_direction_t dma_channel_direction)
{
QM_CHECK(uart < QM_UART_NUM, -EINVAL);
QM_CHECK(dma_ctrl_id < QM_DMA_NUM, -EINVAL);
QM_CHECK(dma_channel_id < QM_DMA_CHANNEL_NUM, -EINVAL);
int ret = -EINVAL;
qm_dma_channel_config_t dma_chan_cfg = {0};
/* UART has inverted handshake polarity. */
dma_chan_cfg.handshake_polarity = QM_DMA_HANDSHAKE_POLARITY_LOW;
dma_chan_cfg.channel_direction = dma_channel_direction;
dma_chan_cfg.source_transfer_width = QM_DMA_TRANS_WIDTH_8;
dma_chan_cfg.destination_transfer_width = QM_DMA_TRANS_WIDTH_8;
/* Default FIFO threshold is 1/2 full (8 bytes). */
dma_chan_cfg.source_burst_length = QM_DMA_BURST_TRANS_LENGTH_8;
dma_chan_cfg.destination_burst_length = QM_DMA_BURST_TRANS_LENGTH_8;
dma_chan_cfg.client_callback = uart_dma_callback;
switch (dma_channel_direction) {
case QM_DMA_MEMORY_TO_PERIPHERAL:
switch (uart) {
case QM_UART_0:
dma_chan_cfg.handshake_interface = DMA_HW_IF_UART_A_TX;
break;
case QM_UART_1:
dma_chan_cfg.handshake_interface = DMA_HW_IF_UART_B_TX;
break;
default:
return -EINVAL;
}
/*
* The DMA driver needs a pointer to the DMA context structure
* used on DMA callback invocation.
*/
dma_context_tx[uart].dma_channel_id = dma_channel_id;
dma_chan_cfg.callback_context = &dma_context_tx[uart];
break;
case QM_DMA_PERIPHERAL_TO_MEMORY:
switch (uart) {
case QM_UART_0:
dma_chan_cfg.handshake_interface = DMA_HW_IF_UART_A_RX;
break;
case QM_UART_1:
dma_chan_cfg.handshake_interface = DMA_HW_IF_UART_B_RX;
break;
default:
return -EINVAL;
}
/*
* The DMA driver needs a pointer to the DMA context structure
* used on DMA callback invocation.
*/
dma_context_rx[uart].dma_channel_id = dma_channel_id;
dma_chan_cfg.callback_context = &dma_context_rx[uart];
break;
default:
/* Direction not allowed on UART transfers. */
return -EINVAL;
}
dma_core[uart] = dma_ctrl_id;
ret = qm_dma_channel_set_config(dma_ctrl_id, dma_channel_id,
&dma_chan_cfg);
return ret;
}
int qm_uart_dma_write(const qm_uart_t uart,
const qm_uart_transfer_t *const xfer)
{
QM_CHECK(uart < QM_UART_NUM, -EINVAL);
QM_CHECK(xfer, -EINVAL);
QM_CHECK(xfer->data, -EINVAL);
QM_CHECK(xfer->data_len, -EINVAL);
int ret = -EINVAL;
qm_uart_reg_t *regs = QM_UART[uart];
qm_dma_transfer_t dma_trans = {0};
dma_trans.block_size = xfer->data_len;
dma_trans.source_address = (uint32_t *)xfer->data;
dma_trans.destination_address = (uint32_t *)&regs->rbr_thr_dll;
ret = qm_dma_transfer_set_config(
dma_core[uart], dma_context_tx[uart].dma_channel_id, &dma_trans);
if (ret) {
return ret;
}
/* Store the user transfer pointer that we will need on DMA callback. */
dma_context_tx[uart].xfer = xfer;
/* Set the FCR register FIFO thresholds. */
regs->iir_fcr =
(QM_UART_FCR_FIFOE | QM_UART_FCR_DEFAULT_TX_RX_THRESHOLD);
ret = qm_dma_transfer_start(dma_core[uart],
dma_context_tx[uart].dma_channel_id);
return ret;
}
int qm_uart_dma_read(const qm_uart_t uart, const qm_uart_transfer_t *const xfer)
{
QM_CHECK(uart < QM_UART_NUM, -EINVAL);
QM_CHECK(xfer, -EINVAL);
QM_CHECK(xfer->data, -EINVAL);
QM_CHECK(xfer->data_len, -EINVAL);
int ret = -EINVAL;
qm_uart_reg_t *regs = QM_UART[uart];
qm_dma_transfer_t dma_trans = {0};
dma_trans.block_size = xfer->data_len;
dma_trans.source_address = (uint32_t *)&regs->rbr_thr_dll;
dma_trans.destination_address = (uint32_t *)xfer->data;
ret = qm_dma_transfer_set_config(
dma_core[uart], dma_context_rx[uart].dma_channel_id, &dma_trans);
if (ret) {
return ret;
}
/* Store the user transfer pointer that we will need on DMA callback. */
dma_context_rx[uart].xfer = xfer;
/* Set the FCR register FIFO thresholds. */
regs->iir_fcr =
(QM_UART_FCR_FIFOE | QM_UART_FCR_DEFAULT_TX_RX_THRESHOLD);
ret = qm_dma_transfer_start(dma_core[uart],
dma_context_rx[uart].dma_channel_id);
return ret;
}
int qm_uart_dma_write_terminate(const qm_uart_t uart)
{
QM_CHECK(uart < QM_UART_NUM, -EINVAL);
int ret = qm_dma_transfer_terminate(
dma_core[uart], dma_context_tx[uart].dma_channel_id);
return ret;
}
int qm_uart_dma_read_terminate(const qm_uart_t uart)
{
QM_CHECK(uart < QM_UART_NUM, -EINVAL);
int ret = qm_dma_transfer_terminate(
dma_core[uart], dma_context_rx[uart].dma_channel_id);
return ret;
}

View file

@ -0,0 +1,39 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "qm_version.h"
uint32_t qm_ver_rom(void)
{
volatile uint32_t *ver_pointer;
ver_pointer = (uint32_t*)ROM_VERSION_ADDRESS;
return *ver_pointer;
}

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@ -31,18 +31,20 @@
#define QM_WDT_RELOAD_VALUE (0x76)
static void (*callback[QM_WDT_NUM])(void);
static void (*callback[QM_WDT_NUM])(void *data);
static void *callback_data[QM_WDT_NUM];
void qm_wdt_isr_0(void)
QM_ISR_DECLARE(qm_wdt_isr_0)
{
if (callback[QM_WDT_0])
callback[QM_WDT_0]();
if (callback[QM_WDT_0]) {
callback[QM_WDT_0](callback_data);
}
QM_ISR_EOI(QM_IRQ_WDT_0_VECTOR);
}
qm_rc_t qm_wdt_start(const qm_wdt_t wdt)
int qm_wdt_start(const qm_wdt_t wdt)
{
QM_CHECK(wdt < QM_WDT_NUM, QM_RC_EINVAL);
QM_CHECK(wdt < QM_WDT_NUM, -EINVAL);
QM_WDT[wdt].wdt_cr |= QM_WDT_ENABLE;
@ -59,41 +61,33 @@ qm_rc_t qm_wdt_start(const qm_wdt_t wdt)
QM_SCSS_PERIPHERAL->periph_cfg0 |= BIT(1);
return QM_RC_OK;
return 0;
}
qm_rc_t qm_wdt_set_config(const qm_wdt_t wdt, const qm_wdt_config_t *const cfg)
int qm_wdt_set_config(const qm_wdt_t wdt, const qm_wdt_config_t *const cfg)
{
QM_CHECK(wdt < QM_WDT_NUM, QM_RC_EINVAL);
QM_CHECK(cfg != NULL, QM_RC_EINVAL);
QM_CHECK(wdt < QM_WDT_NUM, -EINVAL);
QM_CHECK(cfg != NULL, -EINVAL);
if (cfg->mode == QM_WDT_MODE_INTERRUPT_RESET) {
callback[wdt] = cfg->callback;
callback_data[wdt] = cfg->callback_data;
}
QM_WDT[wdt].wdt_cr &= ~QM_WDT_MODE;
QM_WDT[wdt].wdt_cr |= cfg->mode << QM_WDT_MODE_OFFSET;
QM_WDT[wdt].wdt_torr = cfg->timeout;
/* kick the WDT to load the Timeout Period(TOP) value */
qm_wdt_reload(wdt);
callback[wdt] = cfg->callback;
return QM_RC_OK;
return 0;
}
qm_rc_t qm_wdt_get_config(const qm_wdt_t wdt, qm_wdt_config_t *const cfg)
int qm_wdt_reload(const qm_wdt_t wdt)
{
QM_CHECK(wdt < QM_WDT_NUM, QM_RC_EINVAL);
QM_CHECK(cfg != NULL, QM_RC_EINVAL);
cfg->timeout = QM_WDT[wdt].wdt_torr & QM_WDT_TIMEOUT_MASK;
cfg->mode = (QM_WDT[wdt].wdt_cr & QM_WDT_MODE) >> QM_WDT_MODE_OFFSET;
cfg->callback = callback[wdt];
return QM_RC_OK;
}
qm_rc_t qm_wdt_reload(const qm_wdt_t wdt)
{
QM_CHECK(wdt < QM_WDT_NUM, QM_RC_EINVAL);
QM_CHECK(wdt < QM_WDT_NUM, -EINVAL);
QM_WDT[wdt].wdt_crr = QM_WDT_RELOAD_VALUE;
return QM_RC_OK;
return 0;
}

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@ -27,11 +27,12 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "qm_rar.h"
#include "rar.h"
#if (HAS_RAR)
qm_rc_t rar_set_mode(rar_state_t mode)
int rar_set_mode(const rar_state_t mode)
{
QM_CHECK(mode <= RAR_RETENTION, -EINVAL);
volatile uint32_t i = 32;
volatile uint32_t reg;
@ -54,6 +55,6 @@ qm_rc_t rar_set_mode(rar_state_t mode)
QM_SCSS_PMU->aon_vr = QM_AON_VR_PASS_CODE | reg;
break;
}
return QM_RC_OK;
return 0;
}
#endif

View file

@ -0,0 +1,301 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __QM_SS_ADC_H__
#define __QM_SS_ADC_H__
#include "qm_common.h"
#include "qm_sensor_regs.h"
/**
* Analog to Digital Converter (ADC) for the Sensor Subsystem.
*
* @defgroup groupSSADC SS ADC
* @{
*/
/**
* SS ADC sample size type.
*/
typedef uint16_t qm_ss_adc_sample_t;
/**
* SS ADC calibration type.
*/
typedef uint8_t qm_ss_adc_calibration_t;
/**
* SS ADC status.
*/
typedef enum {
QM_SS_ADC_IDLE = 0x0, /**< ADC idle. */
QM_SS_ADC_COMPLETE = 0x1, /**< ADC data available. */
QM_SS_ADC_OVERFLOW = 0x2, /**< ADC overflow error. */
QM_SS_ADC_UNDERFLOW = 0x4, /**< ADC underflow error. */
QM_SS_ADC_SEQERROR = 0x8 /**< ADC sequencer error. */
} qm_ss_adc_status_t;
/**
* SS ADC resolution type.
*/
typedef enum {
QM_SS_ADC_RES_6_BITS = 0x5, /**< 6-bit mode. */
QM_SS_ADC_RES_8_BITS = 0x7, /**< 8-bit mode. */
QM_SS_ADC_RES_10_BITS = 0x9, /**< 10-bit mode. */
QM_SS_ADC_RES_12_BITS = 0xB /**< 12-bit mode. */
} qm_ss_adc_resolution_t;
/**
* SS ADC operating mode type.
*/
typedef enum {
QM_SS_ADC_MODE_DEEP_PWR_DOWN, /**< Deep power down mode. */
QM_SS_ADC_MODE_PWR_DOWN, /**< Power down mode. */
QM_SS_ADC_MODE_STDBY, /**< Standby mode. */
QM_SS_ADC_MODE_NORM_CAL, /**< Normal mode, with calibration. */
QM_SS_ADC_MODE_NORM_NO_CAL /**< Normal mode, no calibration. */
} qm_ss_adc_mode_t;
/**
* SS ADC channels type.
*/
typedef enum {
QM_SS_ADC_CH_0, /**< ADC Channel 0. */
QM_SS_ADC_CH_1, /**< ADC Channel 1. */
QM_SS_ADC_CH_2, /**< ADC Channel 2. */
QM_SS_ADC_CH_3, /**< ADC Channel 3. */
QM_SS_ADC_CH_4, /**< ADC Channel 4. */
QM_SS_ADC_CH_5, /**< ADC Channel 5. */
QM_SS_ADC_CH_6, /**< ADC Channel 6. */
QM_SS_ADC_CH_7, /**< ADC Channel 7. */
QM_SS_ADC_CH_8, /**< ADC Channel 8. */
QM_SS_ADC_CH_9, /**< ADC Channel 9. */
QM_SS_ADC_CH_10, /**< ADC Channel 10. */
QM_SS_ADC_CH_11, /**< ADC Channel 11. */
QM_SS_ADC_CH_12, /**< ADC Channel 12. */
QM_SS_ADC_CH_13, /**< ADC Channel 13. */
QM_SS_ADC_CH_14, /**< ADC Channel 14. */
QM_SS_ADC_CH_15, /**< ADC Channel 15. */
QM_SS_ADC_CH_16, /**< ADC Channel 16. */
QM_SS_ADC_CH_17, /**< ADC Channel 17. */
QM_SS_ADC_CH_18 /**< ADC Channel 18. */
} qm_ss_adc_channel_t;
/**
* SS ADC interrupt callback source.
*/
typedef enum {
QM_SS_ADC_TRANSFER, /**< Transfer complete or error callback. */
QM_SS_ADC_MODE_CHANGED, /**< Mode change complete callback. */
QM_SS_ADC_CAL_COMPLETE, /**< Calibration complete callback. */
} qm_ss_adc_cb_source_t;
/**
* SS ADC configuration type.
*/
typedef struct {
/**
* Sample interval in ADC clock cycles, defines the period to wait
* between the start of each sample and can be in the range
* [(resolution+2) - 255].
*/
uint8_t window;
qm_ss_adc_resolution_t resolution; /**< 12, 10, 8, 6-bit resolution. */
} qm_ss_adc_config_t;
/**
* SS ADC transfer type.
*/
typedef struct {
qm_ss_adc_channel_t *ch; /**< Channel sequence array (1-32 channels). */
uint8_t ch_len; /**< Number of channels in the above array. */
qm_ss_adc_sample_t *samples; /**< Array to store samples. */
uint32_t samples_len; /**< Length of sample array. */
/**
* Transfer callback.
*
* Called when a conversion is performed or an error is detected.
*
* @param[in] data The callback user data.
* @param[in] error 0 on success.
* Negative @ref errno for possible error codes.
* @param[in] status ADC status.
* @param[in] source Interrupt callback source.
*/
void (*callback)(void *data, int error, qm_ss_adc_status_t status,
qm_ss_adc_cb_source_t source);
void *callback_data; /**< Callback user data. */
} qm_ss_adc_xfer_t;
/**
* Switch operating mode of SS ADC.
*
* This call is blocking.
*
* @param[in] adc Which ADC to enable.
* @param[in] mode ADC operating mode.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_ss_adc_set_mode(const qm_ss_adc_t adc, const qm_ss_adc_mode_t mode);
/**
* Switch operating mode of SS ADC.
*
* This call is non-blocking and will call the user callback on completion.
*
* @param[in] adc Which ADC to enable.
* @param[in] mode ADC operating mode.
* @param[in] callback Callback called on completion.
* @param[in] callback_data The callback user data.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_ss_adc_irq_set_mode(const qm_ss_adc_t adc, const qm_ss_adc_mode_t mode,
void (*callback)(void *data, int error,
qm_ss_adc_status_t status,
qm_ss_adc_cb_source_t source),
void *callback_data);
/**
* Calibrate the SS ADC.
*
* It is necessary to calibrate if it is intended to use Normal Mode With
* Calibration. The calibration must be performed if the ADC is used for the
* first time or has been in deep power down mode. This call is blocking.
*
* @param[in] adc Which ADC to calibrate.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_ss_adc_calibrate(const qm_ss_adc_t adc);
/**
* Calibrate the SS ADC.
*
* It is necessary to calibrate if it is intended to use Normal Mode With
* Calibration. The calibration must be performed if the ADC is used for the
* first time or has been in deep power down mode. This call is non-blocking
* and will call the user callback on completion.
*
* @param[in] adc Which ADC to calibrate.
* @param[in] callback Callback called on completion.
* @param[in] callback_data The callback user data.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_ss_adc_irq_calibrate(const qm_ss_adc_t adc,
void (*callback)(void *data, int error,
qm_ss_adc_status_t status,
qm_ss_adc_cb_source_t source),
void *callback_data);
/**
* Set SS ADC calibration data.
*
* @param[in] adc Which ADC to set calibration for.
* @param[in] cal Calibration data.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_ss_adc_set_calibration(const qm_ss_adc_t adc,
const qm_ss_adc_calibration_t cal);
/**
* Get the current calibration data for an SS ADC.
*
* @param[in] adc Which ADC to get calibration for.
* @param[out] adc Calibration data. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_ss_adc_get_calibration(const qm_ss_adc_t adc,
qm_ss_adc_calibration_t *const cal);
/**
* Set SS ADC configuration.
*
* This sets the sample window and resolution.
*
* @param[in] adc Which ADC to configure.
* @param[in] cfg ADC configuration. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_ss_adc_set_config(const qm_ss_adc_t adc,
const qm_ss_adc_config_t *const cfg);
/**
* Synchronously read values from the ADC.
*
* This blocking call can read 1-32 ADC values into the array provided.
*
* @param[in] adc Which ADC to read.
* @param[in,out] xfer Channel and sample info. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_ss_adc_convert(const qm_ss_adc_t adc, qm_ss_adc_xfer_t *const xfer);
/**
* Asynchronously read values from the SS ADC.
*
* This is a non-blocking call and will call the user provided callback after
* the requested number of samples have been converted.
*
* @param[in] adc Which ADC to read.
* @param[in,out] xfer Channel sample and callback info. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_ss_adc_irq_convert(const qm_ss_adc_t adc, qm_ss_adc_xfer_t *const xfer);
/**
* @}
*/
#endif /* __QM_ADC_H__ */

View file

@ -0,0 +1,179 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __QM_SS_GPIO_H__
#define __QM_SS_GPIO_H__
#include "qm_common.h"
#include "qm_sensor_regs.h"
/**
* General Purpose IO for Sensor Subsystem.
*
* @defgroup groupSSGPIO SS GPIO
* @{
*/
/**
* GPIO SS pin states.
*/
typedef enum {
QM_SS_GPIO_LOW, /**< Pin level high. */
QM_SS_GPIO_HIGH, /**< Pin level low. */
QM_SS_GPIO_STATE_NUM
} qm_ss_gpio_state_t;
/**
* GPIO port configuration type.
*
* Each bit in the registers control a GPIO pin.
*/
typedef struct {
uint32_t direction; /**< SS GPIO direction, 0b: input, 1b: output. */
uint32_t int_en; /**< Interrupt enable. */
uint32_t int_type; /**< Interrupt type, 0b: level; 1b: edge. */
uint32_t int_polarity; /**< Interrupt polarity, 0b: low, 1b: high. */
uint32_t int_debounce; /**< Debounce on/off. */
/**
* User callback.
*
* Called for any interrupt on the Sensor Subsystem GPIO.
*
* @param[in] data The callback user data.
* @param[in] int_status Bitfield of triggered pins.
*/
void (*callback)(void *data, uint32_t int_status);
void *callback_data; /**< Callback user data. */
} qm_ss_gpio_port_config_t;
/**
* Set SS GPIO port configuration.
*
* This includes the direction of the pins, if interrupts are enabled or not,
* the level on which an interrupt is generated, the polarity of interrupts
* and if GPIO-debounce is enabled or not. If interrupts are enabled it also
* registers the user defined callback function.
*
* @param[in] gpio SS GPIO port index to configure.
* @param[in] cfg New configuration for SS GPIO port. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_ss_gpio_set_config(const qm_ss_gpio_t gpio,
const qm_ss_gpio_port_config_t *const cfg);
/**
* Read the current value of a single pin on a given SS GPIO port.
*
* @param[in] gpio SS GPIO port index.
* @param[in] pin Pin of SS GPIO port to read.
* @param[out] state QM_GPIO_LOW for low or QM_GPIO_HIGH for high. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_ss_gpio_read_pin(const qm_ss_gpio_t gpio, const uint8_t pin,
qm_ss_gpio_state_t *const state);
/**
* Set a single pin on a given SS GPIO port.
*
* @param[in] gpio SS GPIO port index.
* @param[in] pin Pin of SS GPIO port to set.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_ss_gpio_set_pin(const qm_ss_gpio_t gpio, const uint8_t pin);
/**
* Clear a single pin on a given SS GPIO port.
*
* @param[in] gpio SS GPIO port index.
* @param[in] pin Pin of SS GPIO port to clear.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_ss_gpio_clear_pin(const qm_ss_gpio_t gpio, const uint8_t pin);
/**
* Set or clear a single SS GPIO pin using a state variable.
*
* @param[in] gpio GPIO port index.
* @param[in] pin Pin of GPIO port to update.
* @param[in] state QM_GPIO_LOW for low or QM_GPIO_HIGH for high.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_ss_gpio_set_pin_state(const qm_ss_gpio_t gpio, const uint8_t pin,
const qm_ss_gpio_state_t state);
/**
* Get SS GPIO port values.
*
* Read entire SS GPIO port. Each bit of the val parameter is set to the current
* value of each pin on the port. Maximum 32 pins per port.
*
* @param[in] gpio SS GPIO port index.
* @param[out] port Value of all pins on GPIO port. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_ss_gpio_read_port(const qm_ss_gpio_t gpio, uint32_t *const port);
/**
* Get SS GPIO port values.
*
* Write entire SS GPIO port. Each pin on the SS GPIO port is set to the
* corresponding value set in the val parameter. Maximum 32 pins per port.
*
* @param[in] gpio SS GPIO port index.
* @param[in] val Value of all pins on SS GPIO port.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_ss_gpio_write_port(const qm_ss_gpio_t gpio, const uint32_t val);
/**
* @}
*/
#endif /* __QM_SS_GPIO_H__ */

View file

@ -0,0 +1,264 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __QM_SS_I2C_H__
#define __QM_SS_I2C_H__
#include "qm_common.h"
#include "qm_sensor_regs.h"
/**
* I2C driver for Sensor Subsystem.
*
* @defgroup groupSSI2C SS I2C
* @{
*/
/**
* Standard speed High/low period for 50% duty cycle bus clock (in nanosecs).
*/
#define QM_I2C_SS_50_DC_NS (5000)
/**
* Fast Speed High/low period for 50% duty cycle bus clock (in nanosecs).
*/
#define QM_I2C_FS_50_DC_NS (1250)
/**
* High Speed High/low period for 50% duty cycle bus clock (in nanosecs).
*/
#define QM_I2C_FSP_50_DC_NS (500)
/**
* Standard speed minimum low period to meet timing requirements (in nanosecs).
*/
#define QM_I2C_MIN_SS_NS (4700)
/**
* Fast speed minimum low period to meet timing requirements (in nanosecs).
*/
#define QM_I2C_MIN_FS_NS (1300)
/**
* High speed minimum low period to meet timing requirements (in nanosecs).
*/
#define QM_I2C_MIN_FSP_NS (500)
/**
* QM SS I2C addressing type.
*/
typedef enum {
QM_SS_I2C_7_BIT = 0, /**< 7-bit mode. */
QM_SS_I2C_10_BIT /**< 10-bit mode. */
} qm_ss_i2c_addr_t;
/**
* QM SS I2C Speed Type.
*/
typedef enum {
QM_SS_I2C_SPEED_STD = 1, /**< Standard mode (100 Kbps). */
QM_SS_I2C_SPEED_FAST = 2 /**< Fast mode (400 Kbps). */
} qm_ss_i2c_speed_t;
/**
* QM SS I2C status type.
*/
typedef enum {
QM_I2C_IDLE = 0, /**< Controller idle. */
QM_I2C_TX_ABRT_7B_ADDR_NOACK = BIT(0), /**< 7-bit address noack. */
QM_I2C_TX_ABRT_TXDATA_NOACK = BIT(3), /**< Tx data noack. */
QM_I2C_TX_ABRT_SBYTE_ACKDET = BIT(7), /**< Start ACK. */
QM_I2C_TX_ABRT_MASTER_DIS = BIT(11), /**< Master disabled. */
QM_I2C_TX_ARB_LOST = BIT(12), /**< Master lost arbitration. */
QM_I2C_TX_ABRT_SLVFLUSH_TXFIFO = BIT(13), /**< Slave flush tx FIFO. */
QM_I2C_TX_ABRT_SLV_ARBLOST = BIT(14), /**< Slave lost bus. */
QM_I2C_TX_ABRT_SLVRD_INTX = BIT(15), /**< Slave read completion. */
QM_I2C_TX_ABRT_USER_ABRT = BIT(16), /**< User abort. */
QM_I2C_BUSY = BIT(17) /**< Controller busy. */
} qm_ss_i2c_status_t;
/**
* QM SS I2C configuration type.
*/
typedef struct {
qm_ss_i2c_speed_t speed; /**< Standard, Fast Mode. */
qm_ss_i2c_addr_t address_mode; /**< 7 or 10 bit addressing. */
} qm_ss_i2c_config_t;
/**
* QM SS I2C transfer type.
* - if tx len is 0: perform receive-only transaction.
* - if rx len is 0: perform transmit-only transaction.
* - both tx and rx len not 0: perform a transmit-then-receive
* combined transaction.
*/
typedef struct {
uint8_t *tx; /**< Write data. */
uint32_t tx_len; /**< Write data length. */
uint8_t *rx; /**< Read data. */
uint32_t rx_len; /**< Read buffer length. */
bool stop; /**< Generate master STOP. */
/**
* User callback.
*
* @param[in] data User defined data.
* @param[in] rc 0 on success.
* Negative @ref errno for possible error codes.
* @param[in] status I2C status.
* @param[in] len Length of the transfer if succesfull, 0 otherwise.
*/
void (*callback)(void *data, int rc, qm_ss_i2c_status_t status,
uint32_t len);
void *callback_data; /**< User callback data. */
} qm_ss_i2c_transfer_t;
/**
* Set SS I2C configuration.
*
* @param[in] i2c Which I2C to set the configuration of.
* @param[in] cfg I2C configuration. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_ss_i2c_set_config(const qm_ss_i2c_t i2c,
const qm_ss_i2c_config_t *const cfg);
/**
* Set I2C speed.
*
* Fine tune SS I2C clock speed.
* This will set the SCL low count and the SCL hi count cycles
* to achieve any required speed.
*
* @param[in] i2c I2C index.
* @param[in] speed Bus speed (Standard or Fast.Fast includes Fast + mode).
* @param[in] lo_cnt SCL low count.
* @param[in] hi_cnt SCL high count.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_ss_i2c_set_speed(const qm_ss_i2c_t i2c, const qm_ss_i2c_speed_t speed,
const uint16_t lo_cnt, const uint16_t hi_cnt);
/**
* Retrieve SS I2C status.
*
* @param[in] i2c Which I2C to read the status of.
* @param[out] status Get i2c status. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_ss_i2c_get_status(const qm_ss_i2c_t i2c,
qm_ss_i2c_status_t *const status);
/**
* Master write on I2C.
*
* Perform a master write on the SS I2C bus.
* This is a blocking synchronous call.
*
* @param[in] i2c Which I2C to write to.
* @param[in] slave_addr Address of slave to write to.
* @param[in] data Pre-allocated buffer of data to write.
* This must not be NULL.
* @param[in] len length of data to write.
* @param[in] stop Generate a STOP condition at the end of tx.
* @param[out] status Get i2c status.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_ss_i2c_master_write(const qm_ss_i2c_t i2c, const uint16_t slave_addr,
const uint8_t *const data, uint32_t len,
const bool stop, qm_ss_i2c_status_t *const status);
/**
* Master read of I2C.
*
* Perform a single byte master read from the SS I2C. This is a blocking call.
*
* @param[in] i2c Which I2C to read from.
* @param[in] slave_addr Address of slave device to read from.
* @param[out] data Pre-allocated buffer to populate with data.
* This must not be NULL.
* @param[in] len length of data to read from slave.
* @param[in] stop Generate a STOP condition at the end of tx.
* @param[out] status Get i2c status.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_ss_i2c_master_read(const qm_ss_i2c_t i2c, const uint16_t slave_addr,
uint8_t *const data, uint32_t len, const bool stop,
qm_ss_i2c_status_t *const status);
/**
* Interrupt based master transfer on I2C.
*
* Perform an interrupt based master transfer on the SS I2C bus. The function
* will replenish/empty TX/RX FIFOs on I2C empty/full interrupts.
*
* @param[in] i2c Which I2C to transfer from.
* @param[in] xfer Transfer structure includes write / read data and length,
* user callback function and the callback context.
* This must not be NULL.
* @param[in] slave_addr Address of slave to transfer data with.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_ss_i2c_master_irq_transfer(const qm_ss_i2c_t i2c,
const qm_ss_i2c_transfer_t *const xfer,
const uint16_t slave_addr);
/**
* Terminate I2C IRQ/DMA transfer.
*
* Terminate the current IRQ transfer on the SS I2C bus.
* This will cause the user callback to be called with status
* QM_I2C_TX_ABRT_USER_ABRT.
*
* @param[in] i2c I2C register block pointer.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_ss_i2c_irq_transfer_terminate(const qm_ss_i2c_t i2c);
/**
* @}
*/
#endif /* __QM_SS_I2C_H__ */

View file

@ -0,0 +1,92 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __QM_SS_INTERRUPT_H__
#define __QM_SS_INTERRUPT_H__
#include "qm_common.h"
#include "qm_sensor_regs.h"
/**
* Interrupt driver for Sensor Subsystem.
*
* @defgroup groupSSINT SS Interrupt
* @{
*/
/**
* Interrupt service routine type.
*/
typedef void (*qm_ss_isr_t)(struct interrupt_frame *frame);
/**
* Enable interrupt delivery for the Sensor Subsystem.
*/
void qm_ss_irq_enable(void);
/**
* Disable interrupt delivery for the Sensor Subsystem.
*/
void qm_ss_irq_disable(void);
/**
* Unmask a given interrupt line.
*
* @param [in] irq Which IRQ to unmask.
*/
void qm_ss_irq_unmask(uint32_t irq);
/**
* Mask a given interrupt line.
*
* @param [in] irq Which IRQ to mask.
*/
void qm_ss_irq_mask(uint32_t irq);
/**
* Request a given IRQ and register ISR to interrupt vector.
*
* @param [in] irq IRQ number.
* @param [in] isr ISR to register to given IRQ.
*/
void qm_ss_irq_request(uint32_t irq, qm_ss_isr_t isr);
/**
* Register an Interrupt Service Routine to a given interrupt vector.
*
* @param [in] vector Interrupt Vector number.
* @param [in] isr ISR to register to given vector. Must be a valid Sensor
* Subsystem ISR.
*/
void qm_ss_int_vector_request(uint32_t vector, qm_ss_isr_t isr);
/**
* @}
*/
#endif /* __QM_SS_INTERRUPT_H__ */

View file

@ -0,0 +1,169 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __QM_SS_ISR_H__
#define __QM_SS_ISR_H__
#include "qm_common.h"
/**
* Sensor Subsystem Interrupt Service Routines.
*
* @defgroup groupSSISR SS ISR
* @{
*/
/**
* ISR for ADC interrupt.
*
* This function needs to be registered with
* @code qm_ss_irq_request(QM_SS_IRQ_ADC_IRQ, qm_ss_adc_0_isr);
* @endcode if IRQ based conversions are used.
*/
QM_ISR_DECLARE(qm_ss_adc_0_isr);
/**
* ISR for ADC error interrupt.
*
* This function needs to be registered with
* @code qm_ss_irq_request(QM_SS_IRQ_ADC_ERR, qm_ss_adc_0_err_isr);
* @endcode if IRQ based conversions are used.
*/
QM_ISR_DECLARE(qm_ss_adc_0_err_isr);
/**
* ISR for GPIO 0 error interrupt.
*
* This function needs to be registered with
* @code qm_ss_irq_request(QM_SS_IRQ_GPIO_INTR_0, qm_ss_gpio_isr_0);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_ss_gpio_isr_0);
/**
* ISR for GPIO 1 error interrupt.
*
* This function needs to be registered with
* @code qm_ss_irq_request(QM_SS_IRQ_GPIO_INTR_1, qm_ss_gpio_isr_1);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_ss_gpio_isr_1);
/**
* ISR for I2C 0 error interrupt.
*
* This function needs to be registered with
* @code qm_ss_irq_request(QM_SS_IRQ_I2C_0_ERR, qm_ss_i2c_isr_0);
* @code qm_ss_irq_request(QM_SS_IRQ_I2C_0_RX_AVAIL, qm_ss_i2c_isr_0);
* @code qm_ss_irq_request(QM_SS_IRQ_I2C_0_TX_REQ, qm_ss_i2c_isr_0);
* @code qm_ss_irq_request(QM_SS_IRQ_I2C_0_STOP_DET, qm_ss_i2c_isr_0);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_ss_i2c_isr_0);
/**
* ISR for I2C 1 error interrupt.
*
* This function needs to be registered with
* @code qm_ss_irq_request(QM_SS_IRQ_I2C_1_ERR, qm_ss_i2c_isr_1);
* @code qm_ss_irq_request(QM_SS_IRQ_I2C_1_RX_AVAIL, qm_ss_i2c_isr_1);
* @code qm_ss_irq_request(QM_SS_IRQ_I2C_1_TX_REQ, qm_ss_i2c_isr_1);
* @code qm_ss_irq_request(QM_SS_IRQ_I2C_1_STOP_DET, qm_ss_i2c_isr_1);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_ss_i2c_isr_1);
/**
* ISR for SPI 0 error interrupt.
*
* This function needs to be registered with
* @code qm_ss_irq_request(QM_SS_IRQ_SPI_0_ERR_INT, qm_ss_spi_0_err_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_ss_spi_0_err_isr);
/**
* ISR for SPI 1 error interrupt.
*
* This function needs to be registered with
* @code qm_ss_irq_request(QM_SS_IRQ_SPI_1_ERR_INT, qm_ss_spi_1_err_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_ss_spi_1_err_isr);
/**
* ISR for SPI 0 TX data requested interrupt.
*
* This function needs to be registered with
* @code qm_ss_irq_request(QM_SS_IRQ_SPI_0_TX_REQ, qm_ss_spi_0_tx_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_ss_spi_0_tx_isr);
/**
* ISR for SPI 1 TX data requested interrupt.
*
* This function needs to be registered with
* @code qm_ss_irq_request(QM_SS_IRQ_SPI_1_TX_REQ, qm_ss_spi_1_tx_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_ss_spi_1_tx_isr);
/**
* ISR for SPI 0 RX data available interrupt.
*
* This function needs to be registered with
* @code qm_ss_irq_request(QM_SS_IRQ_SPI_0_RX_AVAIL, qm_ss_spi_0_rx_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_ss_spi_0_rx_isr);
/**
* ISR for SPI 1 data available interrupt.
*
* This function needs to be registered with
* @code qm_ss_irq_request(QM_SS_IRQ_SPI_1_RX_AVAIL, qm_ss_spi_1_rx_isr);
* @endcode if IRQ based transfers are used.
*/
QM_ISR_DECLARE(qm_ss_spi_1_rx_isr);
/**
* ISR for SS Timer 0 interrupt.
*
* This function needs to be registered with
* @code qm_ss_int_vector_request(QM_SS_INT_TIMER_0, qm_ss_timer_isr_0);
* @endcode
*/
QM_ISR_DECLARE(qm_ss_timer_isr_0);
/**
* @}
*/
#endif /* __QM_SS_ISR_H__ */

View file

@ -0,0 +1,299 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __QM_SS_SPI_H__
#define __QM_SS_SPI_H__
#include "qm_common.h"
#include "qm_sensor_regs.h"
/**
* SPI peripheral driver for Sensor Subsystem.
*
* @defgroup groupSSSPI SS SPI
* @{
*/
/**
* QM SPI frame size type.
*/
typedef enum {
QM_SS_SPI_FRAME_SIZE_4_BIT = 3, /**< 4 bit frame. */
QM_SS_SPI_FRAME_SIZE_5_BIT, /**< 5 bit frame. */
QM_SS_SPI_FRAME_SIZE_6_BIT, /**< 6 bit frame. */
QM_SS_SPI_FRAME_SIZE_7_BIT, /**< 7 bit frame. */
QM_SS_SPI_FRAME_SIZE_8_BIT, /**< 8 bit frame. */
QM_SS_SPI_FRAME_SIZE_9_BIT, /**< 9 bit frame. */
QM_SS_SPI_FRAME_SIZE_10_BIT, /**< 10 bit frame. */
QM_SS_SPI_FRAME_SIZE_11_BIT, /**< 11 bit frame. */
QM_SS_SPI_FRAME_SIZE_12_BIT, /**< 12 bit frame. */
QM_SS_SPI_FRAME_SIZE_13_BIT, /**< 13 bit frame. */
QM_SS_SPI_FRAME_SIZE_14_BIT, /**< 14 bit frame. */
QM_SS_SPI_FRAME_SIZE_15_BIT, /**< 15 bit frame. */
QM_SS_SPI_FRAME_SIZE_16_BIT /**< 16 bit frame. */
} qm_ss_spi_frame_size_t;
/**
* SPI transfer mode type.
*/
typedef enum {
/**
* Transmit & Receive mode.
*
* This mode synchronously receives and transmits data during the
* transfer. rx_len and tx_len buffer need to be the same length.
*/
QM_SS_SPI_TMOD_TX_RX,
/**
* Transmit-Only mode.
*
* This mode only transmits data. The rx buffer is not accessed and
* rx_len need to be set to 0.
*/
QM_SS_SPI_TMOD_TX,
/**
* Receive-Only mode.
*
* This mode only receives data. The tx buffer is not accessed and
* tx_len need to be set to 0.
*/
QM_SS_SPI_TMOD_RX,
/**
* EEPROM-Read Mode.
*
* This mode transmits the data stored in the tx buffer (EEPROM
* address). After the transmit is completed it populates the rx buffer
* (EEPROM data) with received data.
*/
QM_SS_SPI_TMOD_EEPROM_READ
} qm_ss_spi_tmode_t;
/**
* SPI bus mode type.
*/
typedef enum {
QM_SS_SPI_BMODE_0, /**< Clock Polarity = 0, Clock Phase = 0. */
QM_SS_SPI_BMODE_1, /**< Clock Polarity = 0, Clock Phase = 1. */
QM_SS_SPI_BMODE_2, /**< Clock Polarity = 1, Clock Phase = 0. */
QM_SS_SPI_BMODE_3 /**< Clock Polarity = 1, Clock Phase = 1. */
} qm_ss_spi_bmode_t;
/**
* SPI slave select type.
*
* Slave selects can be combined by logical OR.
*/
typedef enum {
QM_SS_SPI_SS_NONE = 0, /**< No slave select. */
QM_SS_SPI_SS_0 = BIT(0), /**< Slave select 0. */
QM_SS_SPI_SS_1 = BIT(1), /**< Slave select 1. */
QM_SS_SPI_SS_2 = BIT(2), /**< Slave select 2. */
QM_SS_SPI_SS_3 = BIT(3), /**< Slave select 3. */
} qm_ss_spi_slave_select_t;
/**
* SPI status.
*/
typedef enum {
QM_SS_SPI_IDLE, /**< SPI device is not in use. */
QM_SS_SPI_BUSY, /**< SPI device is busy. */
QM_SS_SPI_RX_OVERFLOW /**< RX transfer has overflown. */
} qm_ss_spi_status_t;
/**
* SPI configuration type.
*/
typedef struct {
qm_ss_spi_frame_size_t frame_size; /**< Frame Size. */
qm_ss_spi_tmode_t transfer_mode; /**< Transfer mode (enum). */
qm_ss_spi_bmode_t bus_mode; /**< Bus mode (enum). */
/**
* Clock divider.
*
* The clock divider sets the SPI speed on the interface.
* A value of 0 will disable SCK. The LSB of this value is ignored.
*/
uint16_t clk_divider;
} qm_ss_spi_config_t;
/**
* SPI IRQ transfer type.
*/
typedef struct {
uint8_t *tx; /**< Write data buffer pointer. */
uint16_t tx_len; /**< Write data length. */
uint8_t *rx; /**< Read data buffer pointer. */
uint16_t rx_len; /**< Read buffer length. */
/**
* Transfer callback.
*
* Called after all data is transmitted/received or if the driver
* detects an error during the SPI transfer.
*
* @param[in] data The callback user data.
* @param[in] error 0 on success.
* Negative @ref errno for possible error codes.
* @param[in] status The SPI module status.
* @param[in] len The amount of frames transmitted.
*/
void (*callback)(void *data, int error, qm_ss_spi_status_t status,
uint16_t len);
void *data; /**< Callback user data. */
} qm_ss_spi_async_transfer_t;
/**
* SPI transfer type.
*/
typedef struct {
uint8_t *tx; /**< Write data buffer pointer. */
uint16_t tx_len; /**< Write data length. */
uint8_t *rx; /**< Read data buffer pointer. */
uint16_t rx_len; /**< Read buffer length. */
} qm_ss_spi_transfer_t;
/**
* Set SPI configuration.
*
* Change the configuration of a SPI module.
* This includes transfer mode, bus mode and clock divider.
*
* This operation is permitted only when the SPI module is disabled.
*
* @param[in] spi SPI module identifier.
* @param[in] cfg New configuration for SPI. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_ss_spi_set_config(const qm_ss_spi_t spi,
const qm_ss_spi_config_t *const cfg);
/**
* Set Slave Select lines.
*
* Select which slaves to perform SPI transmissions on. Select lines can be
* combined using the | operator. It is only suggested to use this functionality
* in TX only mode. This operation is permitted only when a SPI transfer is not
* already in progress; the caller should check that by retrieving the device
* status.
*
* @param[in] spi SPI module identifier.
* @param[in] ss Select lines to enable when performing transfers.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_ss_spi_slave_select(const qm_ss_spi_t spi,
const qm_ss_spi_slave_select_t ss);
/**
* Get SPI bus status.
*
* @param[in] spi SPI module identifier.
* @param[out] status Reference to the variable where to store the current SPI
* bus status (QM_SS_SPI_BUSY if a transfer is in progress
* or QM_SS_SPI_IDLE if SPI device is IDLE).
* This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_ss_spi_get_status(const qm_ss_spi_t spi,
qm_ss_spi_status_t *const status);
/**
* Perform a blocking SPI transfer.
*
* This is a blocking synchronous call. If transfer mode is full duplex
* (QM_SS_SPI_TMOD_TX_RX) tx_len and rx_len must be equal. Similarly, for
* transmit-only transfers (QM_SS_SPI_TMOD_TX) rx_len must be 0, while for
* receive-only transfers (QM_SS_SPI_TMOD_RX) tx_len must be 0.
*
* @param[in] spi SPI module identifier.
* @param[in] xfer Structure containing transfer information.
* This must not be NULL.
* @param[out] status Reference to the variable where to store the SPI status
* at the end of the transfer.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_ss_spi_transfer(const qm_ss_spi_t spi,
const qm_ss_spi_transfer_t *const xfer,
qm_ss_spi_status_t *const status);
/**
* Initiate a interrupt based SPI transfer.
*
* Perform an interrupt based SPI transfer. If transfer mode is full duplex
* (QM_SS_SPI_TMOD_TX_RX), then tx_len and rx_len must be equal. Similarly, for
* transmit-only transfers (QM_SS_SPI_TMOD_TX) rx_len must be 0, while for
* receive-only transfers (QM_SS_SPI_TMOD_RX) tx_len must be 0. This function is
* non blocking.
*
* @param[in] spi SPI module identifier.
* @param[in] xfer Structure containing transfer information.
* The structure must not be NULL and must be kept valid until
* the transfer is complete.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_ss_spi_irq_transfer(const qm_ss_spi_t spi,
const qm_ss_spi_async_transfer_t *const xfer);
/**
* Terminate SPI IRQ transfer.
*
* Terminate the current IRQ SPI transfer.
* This function will trigger complete callbacks even
* if the transfer is not completed.
*
* @param[in] spi SPI module identifier.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_ss_spi_transfer_terminate(const qm_ss_spi_t spi);
/**
* @}
*/
#endif /* __QM_SS_SPI_H__ */

View file

@ -0,0 +1,119 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __QM_SS_TIMER_H__
#define __QM_SS_TIMER_H__
#include "qm_common.h"
#include "qm_sensor_regs.h"
/**
* Timer driver for Sensor Subsystem.
*
* @defgroup groupSSTimer SS Timer
* @{
*/
/**
* Sensor Subsystem Timer Configuration Type.
*/
typedef struct {
bool watchdog_mode; /**< Watchdog mode. */
/**
* Increments in run state only.
*
* If this field is set to 0, the timer will count
* in both halt state and running state.
* When set to 1, this will only increment in
* running state.
*/
bool inc_run_only;
bool int_en; /**< Interrupt enable. */
uint32_t count; /**< Final count value. */
/**
* User callback.
*
* Called for any interrupt on the Sensor Subsystem Timer.
*
* @param[in] data The callback user data.
*/
void (*callback)(void *data);
void *callback_data; /**< Callback user data. */
} qm_ss_timer_config_t;
/**
* Set the SS timer configuration.
*
* This includes final count value, timer mode and if interrupts are enabled.
* If interrupts are enabled, it will configure the callback function.
*
* @param[in] timer Which SS timer to configure.
* @param[in] cfg SS timer configuration. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_ss_timer_set_config(const qm_ss_timer_t timer,
const qm_ss_timer_config_t *const cfg);
/**
* Set SS timer count value.
*
* Set the current count value of the SS timer.
*
* @param[in] timer Which SS timer to set the count of.
* @param[in] count Value to load the timer with.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_ss_timer_set(const qm_ss_timer_t timer, const uint32_t count);
/**
* Get SS timer count value.
*
* Get the current count value of the SS timer.
*
* @param[in] timer Which SS timer to get the count of.
* @param[out] count Current value of timer. This must not be NULL.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int qm_ss_timer_get(const qm_ss_timer_t timer, uint32_t *const count);
/**
* @}
*/
#endif /* __QM_SS_TIMER_H__ */

View file

@ -0,0 +1,182 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __SS_CLK_H__
#define __SS_CLK_H__
#include "qm_common.h"
#include "qm_sensor_regs.h"
#include "clk.h"
/**
* Clock Management for Sensor Subsystem.
*
* The clock distribution has three level of gating:
* 1. SE SoC gating through register CCU_PERIPH_CLK_GATE_CTL
* 2. SS Soc gating through register IO_CREG_MST0_CTRL (IO_CREG_MST0_CTRL)
* 3. SS peripheral clk gating
* Note: the first two are ungated by hardware power-on default (clock gating is
* done at peripheral level). Thus the only one level of control is enough (and
* implemented in ss_clk driver) to gate clock on or off to the particular
* peripheral.
*
* @defgroup groupSSClock SS Clock
* @{
*/
/**
* Peripheral clocks selection type.
*/
typedef enum {
SS_CLK_PERIPH_ADC = BIT(31), /**< ADC clock selector. */
SS_CLK_PERIPH_I2C_1 = BIT(30), /**< I2C 1 clock selector. */
SS_CLK_PERIPH_I2C_0 = BIT(29), /**< I2C 0 clock selector. */
SS_CLK_PERIPH_SPI_1 = BIT(28), /**< SPI 1 clock selector. */
SS_CLK_PERIPH_SPI_0 = BIT(27), /**< SPI 0 clock selector. */
/**
* GPIO 1 clock selector.
*
* Special domain peripherals - these do not map onto the standard
* register.
*/
SS_CLK_PERIPH_GPIO_1 = BIT(1),
/**
* GPIO 0 clock selector.
*
* Special domain peripherals - these do not map onto the standard
* register.
*/
SS_CLK_PERIPH_GPIO_0 = BIT(0)
} ss_clk_periph_t;
/**
* Enable clocking for SS GPIO peripheral.
*
* @param [in] gpio GPIO port index.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int ss_clk_gpio_enable(const qm_ss_gpio_t gpio);
/**
* Disable clocking for SS GPIO peripheral.
*
* @param [in] gpio GPIO port index.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int ss_clk_gpio_disable(const qm_ss_gpio_t gpio);
/**
* Enable clocking for SS SPI peripheral.
*
* @param [in] spi SPI port index.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int ss_clk_spi_enable(const qm_ss_spi_t spi);
/**
* Disable clocking for SS SPI peripheral.
*
* @param [in] spi SPI port index.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int ss_clk_spi_disable(const qm_ss_spi_t spi);
/**
* Enable clocking for SS I2C peripheral.
*
* @param [in] i2c I2C port index.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int ss_clk_i2c_enable(const qm_ss_i2c_t i2c);
/**
* Disable clocking for SS I2C peripheral.
*
* @param [in] i2c I2C port index.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int ss_clk_i2c_disable(const qm_ss_i2c_t i2c);
/**
* Enable the SS ADC clock.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
*/
int ss_clk_adc_enable(void);
/**
* Disable the SS ADC clock.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
*/
int ss_clk_adc_disable(void);
/**
* Set clock divisor for SS ADC.
*
* Note: If the system clock speed is changed, the divisor must be recalculated.
* The minimum supported speed for the SS ADC is 0.14 MHz. So for a system clock
* speed of 1 MHz, the max value of div is 7, and for 32 MHz, the max value is
* 224. System clock speeds of less than 1 MHz are not supported by this
* function.
*
* @param [in] div ADC clock divider value.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int ss_clk_adc_set_div(const uint32_t div);
/**
* @}
*/
#endif /* __SS_CLK_H__ */

View file

@ -0,0 +1,94 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __QM_SS_POWER_STATES_H__
#define __QM_SS_POWER_STATES_H__
#include "qm_common.h"
#include "qm_sensor_regs.h"
/**
* SS Power mode control for Quark SE Microcontrollers.
*
* @defgroup groupSSPower SS Power states
* @{
*/
/**
* Sensor Subsystem SS1 Timers mode type.
*/
typedef enum {
SS_POWER_CPU_SS1_TIMER_OFF = 0, /**< Disable SS Timers in SS1. */
SS_POWER_CPU_SS1_TIMER_ON /**< Keep SS Timers enabled in SS1. */
} ss_power_cpu_ss1_mode_t;
/**
* Enter Sensor SS1 state.
*
* Put the Sensor Subsystem into SS1.<BR>
* Processor Clock is gated in this state.
*
* A wake event causes the Sensor Subsystem to transition to SS0.<BR>
* A wake event is a sensor subsystem interrupt.
*
* According to the mode selected, Sensor Subsystem Timers can be disabled.
*
* @param[in] mode Mode selection for SS1 state.
*/
void ss_power_cpu_ss1(const ss_power_cpu_ss1_mode_t mode);
/**
* Enter Sensor SS2 state or SoC LPSS state.
*
* Put the Sensor Subsystem into SS2.<BR>
* Sensor Complex Clock is gated in this state.<BR>
* Sensor Peripherals are gated in this state.<BR>
*
* This enables entry in LPSS if:
* - Sensor Subsystem is in SS2
* - Lakemont is in C2 or C2LP
* - LPSS entry is enabled
*
* A wake event causes the Sensor Subsystem to transition to SS0.<BR>
* There are two kinds of wake event depending on the Sensor Subsystem
* and SoC state:
* - SS2: a wake event is a Sensor Subsystem interrupt
* - LPSS: a wake event is a Sensor Subsystem interrupt or a Lakemont interrupt
*
* LPSS wake events apply if LPSS is entered.
* If Host wakes the SoC from LPSS,
* Sensor also transitions back to SS0.
*/
void ss_power_cpu_ss2(void);
/**
* @}
*/
#endif /* __QM_SS_POWER_STATES_H__ */

View file

@ -0,0 +1,652 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "qm_ss_adc.h"
#include <string.h>
#include "clk.h"
/* FIFO_INTERRUPT_THRESHOLD is used by qm_ss_adc_irq_convert to set the
* threshold at which the FIFO will trigger an interrupt. It is also used the
* ISR handler to determine the number of samples to read from the FIFO. */
#define FIFO_INTERRUPT_THRESHOLD (16)
#define QM_SS_ADC_CHAN_SEQ_MAX (32)
#define ADC_SAMPLE_SHIFT (11)
/* SS ADC commands. */
#define QM_SS_ADC_CMD_START_CAL (3)
#define QM_SS_ADC_CMD_LOAD_CAL (4)
/* Mode change delay is clock speed * 5. */
#define CALCULATE_DELAY() (clk_sys_get_ticks_per_us() * 5)
static uint32_t adc_base[QM_SS_ADC_NUM] = {QM_SS_ADC_BASE};
static qm_ss_adc_xfer_t irq_xfer[QM_SS_ADC_NUM];
static uint8_t sample_window[QM_SS_ADC_NUM];
static qm_ss_adc_resolution_t resolution[QM_SS_ADC_NUM];
static uint32_t count[QM_SS_ADC_NUM];
static void (*mode_callback[QM_SS_ADC_NUM])(void *data, int error,
qm_ss_adc_status_t status,
qm_ss_adc_cb_source_t source);
static void (*cal_callback[QM_SS_ADC_NUM])(void *data, int error,
qm_ss_adc_status_t status,
qm_ss_adc_cb_source_t source);
static void *mode_callback_data[QM_SS_ADC_NUM];
static void *cal_callback_data[QM_SS_ADC_NUM];
static void dummy_conversion(uint32_t controller);
static bool first_mode_callback_ignored[QM_SS_ADC_NUM] = {false};
static qm_ss_adc_mode_t requested_mode[QM_SS_ADC_NUM];
static void enable_adc(void)
{
QM_SS_REG_AUX_OR(QM_SS_ADC_BASE + QM_SS_ADC_CTRL,
QM_SS_ADC_CTRL_ADC_ENA);
}
static void disable_adc(void)
{
QM_SS_REG_AUX_NAND(QM_SS_ADC_BASE + QM_SS_ADC_CTRL,
QM_SS_ADC_CTRL_ADC_ENA);
}
static void qm_ss_adc_isr_handler(const qm_ss_adc_t adc)
{
uint32_t i, samples_to_read;
uint32_t controller = adc_base[adc];
/* Calculate the number of samples to read. */
samples_to_read = FIFO_INTERRUPT_THRESHOLD;
if (samples_to_read > (irq_xfer[adc].samples_len - count[adc])) {
samples_to_read = irq_xfer[adc].samples_len - count[adc];
}
/* Read the samples into the array. */
for (i = 0; i < samples_to_read; i++) {
/* Pop one sample into the sample register. */
QM_SS_REG_AUX_OR(controller + QM_SS_ADC_SET,
QM_SS_ADC_SET_POP_RX);
/* Read the sample in the array. */
irq_xfer[adc].samples[count[adc]] =
(__builtin_arc_lr(controller + QM_SS_ADC_SAMPLE) >>
(ADC_SAMPLE_SHIFT - resolution[adc]));
count[adc]++;
}
/* Clear the data available status register. */
QM_SS_REG_AUX_OR(controller + QM_SS_ADC_CTRL,
QM_SS_ADC_CTRL_CLR_DATA_A);
if (count[adc] == irq_xfer[adc].samples_len) {
/* Stop the sequencer. */
QM_SS_REG_AUX_NAND(controller + QM_SS_ADC_CTRL,
QM_SS_ADC_CTRL_SEQ_START);
/* Mask all interrupts. */
QM_SS_REG_AUX_OR(controller + QM_SS_ADC_CTRL,
QM_SS_ADC_CTRL_MSK_ALL_INT);
/* Call the user callback. */
if (irq_xfer[adc].callback) {
irq_xfer[adc].callback(irq_xfer[adc].callback_data, 0,
QM_SS_ADC_COMPLETE,
QM_SS_ADC_TRANSFER);
}
/* Disable the ADC. */
disable_adc();
return;
}
}
static void qm_ss_adc_isr_err_handler(const qm_ss_adc_t adc)
{
uint32_t controller = adc_base[adc];
uint32_t intstat = __builtin_arc_lr(controller + QM_SS_ADC_INTSTAT);
/* Stop the sequencer. */
QM_SS_REG_AUX_NAND(controller + QM_SS_ADC_CTRL,
QM_SS_ADC_CTRL_SEQ_START);
/* Mask all interrupts. */
QM_SS_REG_AUX_OR(controller + QM_SS_ADC_CTRL,
QM_SS_ADC_CTRL_MSK_ALL_INT);
/* Call the user callback and pass it the status code. */
if (intstat & QM_SS_ADC_INTSTAT_OVERFLOW) {
if (irq_xfer[adc].callback) {
irq_xfer[adc].callback(irq_xfer[adc].callback_data,
-EIO, QM_SS_ADC_OVERFLOW,
QM_SS_ADC_TRANSFER);
}
}
if (intstat & QM_SS_ADC_INTSTAT_UNDERFLOW) {
if (irq_xfer[adc].callback) {
irq_xfer[adc].callback(irq_xfer[adc].callback_data,
-EIO, QM_SS_ADC_UNDERFLOW,
QM_SS_ADC_TRANSFER);
}
}
if (intstat & QM_SS_ADC_INTSTAT_SEQERROR) {
if (irq_xfer[adc].callback) {
irq_xfer[adc].callback(irq_xfer[adc].callback_data,
-EIO, QM_SS_ADC_SEQERROR,
QM_SS_ADC_TRANSFER);
}
}
/* Clear all error interrupts. */
QM_SS_REG_AUX_OR(controller + QM_SS_ADC_CTRL,
(QM_SS_ADC_CTRL_CLR_SEQERROR |
QM_SS_ADC_CTRL_CLR_OVERFLOW |
QM_SS_ADC_CTRL_CLR_UNDERFLOW));
/* Disable the ADC. */
disable_adc();
}
static void qm_ss_adc_isr_pwr_handler(const qm_ss_adc_t adc)
{
uint32_t controller = adc_base[adc];
/* The IRQ associated with the mode change fires an interrupt as soon
* as it is enabled so it is necessary to ignore it the first time the
* ISR runs. */
if (!first_mode_callback_ignored[adc]) {
first_mode_callback_ignored[adc] = true;
return;
}
/* Perform a dummy conversion if we are transitioning to Normal Mode. */
if ((requested_mode[adc] >= QM_SS_ADC_MODE_NORM_CAL)) {
dummy_conversion(controller);
}
/* Call the user callback if it is set. */
if (mode_callback[adc]) {
mode_callback[adc](mode_callback_data[adc], 0, QM_SS_ADC_IDLE,
QM_SS_ADC_MODE_CHANGED);
}
}
static void qm_ss_adc_isr_cal_handler(const qm_ss_adc_t adc)
{
/* Clear the calibration request reg. */
QM_SS_REG_AUX_NAND(QM_SS_CREG_BASE + QM_SS_IO_CREG_MST0_CTRL,
QM_SS_ADC_CAL_REQ);
/* Call the user callback if it is set. */
if (cal_callback[adc]) {
cal_callback[adc](cal_callback_data[adc], 0, QM_SS_ADC_IDLE,
QM_SS_ADC_CAL_COMPLETE);
}
/* Disable the ADC. */
disable_adc();
}
/* ISR for SS ADC 0 Data avaliable. */
QM_ISR_DECLARE(qm_ss_adc_0_isr)
{
qm_ss_adc_isr_handler(QM_SS_ADC_0);
}
/* ISR for SS ADC 0 Error. */
QM_ISR_DECLARE(qm_ss_adc_0_err_isr)
{
qm_ss_adc_isr_err_handler(QM_SS_ADC_0);
}
/* ISR for SS ADC 0 Mode change. */
QM_ISR_DECLARE(qm_ss_adc_0_pwr_isr)
{
qm_ss_adc_isr_pwr_handler(QM_SS_ADC_0);
}
/* ISR for SS ADC 0 Calibration. */
QM_ISR_DECLARE(qm_ss_adc_0_cal_isr)
{
qm_ss_adc_isr_cal_handler(QM_SS_ADC_0);
}
static void setup_seq_table(const qm_ss_adc_t adc, qm_ss_adc_xfer_t *xfer,
bool single_run)
{
uint32_t i, reg, ch_odd, ch_even, seq_entry = 0;
uint32_t num_channels, controller = adc_base[adc];
/* The sample window is the time in cycles between the start of one
* sample and the start of the next. Resolution is indexed from 0 so we
* need to add 1 and a further 2 for the time it takes to process. */
uint16_t delay = (sample_window[adc] - (resolution[adc] + 3));
/* Reset the sequence table and sequence pointer. */
QM_SS_REG_AUX_OR(controller + QM_SS_ADC_CTRL,
QM_SS_ADC_CTRL_SEQ_TABLE_RST);
/* If a single run is requested and the number of channels in ch is less
* than the number of samples requested we need to insert multiple
* channels into the sequence table. */
num_channels = single_run ? xfer->samples_len : xfer->ch_len;
/* The sequence table has to be populated with pairs of entries so there
* are sample_len/2 pairs of entries. These entries are read from the
* ch array in pairs. The same delay is used between all entries. */
for (i = 0; i < (num_channels - 1); i += 2) {
ch_odd = xfer->ch[(i + 1) % xfer->ch_len];
ch_even = xfer->ch[i % xfer->ch_len];
seq_entry =
((delay << QM_SS_ADC_SEQ_DELAYODD_OFFSET) |
(ch_odd << QM_SS_ADC_SEQ_MUXODD_OFFSET) |
(delay << QM_SS_ADC_SEQ_DELAYEVEN_OFFSET) | ch_even);
__builtin_arc_sr(seq_entry, controller + QM_SS_ADC_SEQ);
}
/* If there is an uneven number of entries we need to create a final
* pair with a singly entry. */
if (num_channels % 2) {
ch_even = xfer->ch[i % xfer->ch_len];
seq_entry =
((delay << QM_SS_ADC_SEQ_DELAYEVEN_OFFSET) | (ch_even));
__builtin_arc_sr(seq_entry, controller + QM_SS_ADC_SEQ);
}
/* Reset the sequence pointer back to 0. */
QM_SS_REG_AUX_OR(controller + QM_SS_ADC_CTRL,
QM_SS_ADC_CTRL_SEQ_PTR_RST);
/* Set the number of entries in the sequencer. */
reg = __builtin_arc_lr(controller + QM_SS_ADC_SET);
reg &= ~QM_SS_ADC_SET_SEQ_ENTRIES_MASK;
reg |= ((num_channels - 1) << QM_SS_ADC_SET_SEQ_ENTRIES_OFFSET);
__builtin_arc_sr(reg, controller + QM_SS_ADC_SET);
}
static void dummy_conversion(uint32_t controller)
{
uint32_t reg;
int res;
/* Flush the FIFO. */
QM_SS_REG_AUX_OR(controller + QM_SS_ADC_SET, QM_SS_ADC_SET_FLUSH_RX);
/* Set up sequence table. */
QM_SS_REG_AUX_OR(controller + QM_SS_ADC_CTRL,
QM_SS_ADC_CTRL_SEQ_TABLE_RST);
/* Populate the seq table. */
__builtin_arc_sr(QM_SS_ADC_SEQ_DUMMY, controller + QM_SS_ADC_SEQ);
/* Reset the sequence pointer back to 0. */
QM_SS_REG_AUX_OR(controller + QM_SS_ADC_CTRL,
QM_SS_ADC_CTRL_SEQ_PTR_RST);
/* Set the number of entries in the sequencer. */
reg = __builtin_arc_lr(controller + QM_SS_ADC_SET);
reg &= ~QM_SS_ADC_SET_SEQ_ENTRIES_MASK;
reg |= (0 << QM_SS_ADC_SET_SEQ_ENTRIES_OFFSET);
__builtin_arc_sr(reg, controller + QM_SS_ADC_SET);
/* Set the threshold. */
reg = __builtin_arc_lr(controller + QM_SS_ADC_SET);
reg &= ~QM_SS_ADC_SET_THRESHOLD_MASK;
reg |= (0 << QM_SS_ADC_SET_THRESHOLD_OFFSET);
__builtin_arc_sr(reg, controller + QM_SS_ADC_SET);
/* Set the sequence mode to single run. */
QM_SS_REG_AUX_NAND(controller + QM_SS_ADC_SET, QM_SS_ADC_SET_SEQ_MODE);
/* Clear all interrupts. */
QM_SS_REG_AUX_OR(controller + QM_SS_ADC_CTRL,
QM_SS_ADC_CTRL_CLR_ALL_INT);
/* Mask all interrupts. */
QM_SS_REG_AUX_OR(controller + QM_SS_ADC_CTRL,
QM_SS_ADC_CTRL_MSK_ALL_INT);
/* Enable the ADC. */
enable_adc();
/* Start the sequencer. */
QM_SS_REG_AUX_OR(controller + QM_SS_ADC_CTRL, QM_SS_ADC_CTRL_SEQ_START);
/* Wait for the sequence to finish. */
while (!(res = __builtin_arc_lr(controller + QM_SS_ADC_INTSTAT))) {
}
/* Flush the FIFO. */
QM_SS_REG_AUX_OR(controller + QM_SS_ADC_SET, QM_SS_ADC_SET_FLUSH_RX);
/* Clear the data available status register. */
QM_SS_REG_AUX_OR(controller + QM_SS_ADC_CTRL,
QM_SS_ADC_CTRL_CLR_DATA_A);
/* Unmask all interrupts. */
QM_SS_REG_AUX_NAND(controller + QM_SS_ADC_CTRL,
QM_SS_ADC_CTRL_MSK_ALL_INT);
/* Disable the ADC. */
disable_adc();
}
int qm_ss_adc_set_config(const qm_ss_adc_t adc,
const qm_ss_adc_config_t *const cfg)
{
uint32_t reg;
uint32_t controller = adc_base[adc];
QM_CHECK(adc < QM_SS_ADC_NUM, -EINVAL);
QM_CHECK(NULL != cfg, -EINVAL);
QM_CHECK(cfg->resolution <= QM_SS_ADC_RES_12_BITS, -EINVAL);
/* The window must be 2 greater than the resolution but since this is
* indexed from 0 we need to add a further 1. */
QM_CHECK(cfg->window >= (cfg->resolution + 3), -EINVAL);
/* Set the sample window and resolution. */
sample_window[adc] = cfg->window;
resolution[adc] = cfg->resolution;
/* Set the resolution. */
reg = __builtin_arc_lr(controller + QM_SS_ADC_SET);
reg &= ~QM_SS_ADC_SET_SAMPLE_WIDTH_MASK;
reg |= resolution[adc];
__builtin_arc_sr(reg, controller + QM_SS_ADC_SET);
return 0;
}
int qm_ss_adc_set_mode(const qm_ss_adc_t adc, const qm_ss_adc_mode_t mode)
{
uint32_t creg, delay;
uint32_t controller = adc_base[adc];
QM_CHECK(adc < QM_SS_ADC_NUM, -EINVAL);
QM_CHECK(mode <= QM_SS_ADC_MODE_NORM_NO_CAL, -EINVAL);
/* Calculate the delay. */
delay = CALCULATE_DELAY();
/* Issue mode change command and wait for it to complete. */
creg = __builtin_arc_lr(QM_SS_CREG_BASE + QM_SS_IO_CREG_MST0_CTRL);
creg &= ~((QM_SS_ADC_DELAY_MASK << QM_SS_ADC_DELAY_OFFSET) |
QM_SS_ADC_PWR_MODE_MASK);
creg |= ((delay << QM_SS_ADC_DELAY_OFFSET) | mode);
__builtin_arc_sr(creg, QM_SS_CREG_BASE + QM_SS_IO_CREG_MST0_CTRL);
/* Wait for the mode change to complete. */
while (!(__builtin_arc_lr(QM_SS_CREG_BASE + QM_SS_IO_CREG_SLV0_OBSR) &
QM_SS_ADC_PWR_MODE_STS)) {
}
/* Perform a dummy conversion if transitioning to Normal Mode. */
if ((mode >= QM_SS_ADC_MODE_NORM_CAL)) {
dummy_conversion(controller);
}
return 0;
}
int qm_ss_adc_irq_set_mode(const qm_ss_adc_t adc, const qm_ss_adc_mode_t mode,
void (*callback)(void *data, int error,
qm_ss_adc_status_t status,
qm_ss_adc_cb_source_t source),
void *callback_data)
{
uint32_t creg, delay;
QM_CHECK(adc < QM_SS_ADC_NUM, -EINVAL);
QM_CHECK(mode <= QM_SS_ADC_MODE_NORM_NO_CAL, -EINVAL);
mode_callback[adc] = callback;
mode_callback_data[adc] = callback_data;
requested_mode[adc] = mode;
/* Calculate the delay. */
delay = CALCULATE_DELAY();
/* Issue mode change command and wait for it to complete. */
creg = __builtin_arc_lr(QM_SS_CREG_BASE + QM_SS_IO_CREG_MST0_CTRL);
creg &= ~((QM_SS_ADC_DELAY_MASK << QM_SS_ADC_DELAY_OFFSET) |
QM_SS_ADC_PWR_MODE_MASK);
creg |= ((delay << QM_SS_ADC_DELAY_OFFSET) | mode);
__builtin_arc_sr(creg, QM_SS_CREG_BASE + QM_SS_IO_CREG_MST0_CTRL);
return 0;
}
int qm_ss_adc_calibrate(const qm_ss_adc_t adc __attribute__((unused)))
{
uint32_t creg;
QM_CHECK(adc < QM_SS_ADC_NUM, -EINVAL);
/* Enable the ADC. */
enable_adc();
/* Issue the start calibrate command. */
creg = __builtin_arc_lr(QM_SS_CREG_BASE + QM_SS_IO_CREG_MST0_CTRL);
creg &= ~(QM_SS_ADC_CAL_CMD_MASK | QM_SS_ADC_CAL_REQ);
creg |= ((QM_SS_ADC_CMD_START_CAL << QM_SS_ADC_CAL_CMD_OFFSET) |
QM_SS_ADC_CAL_REQ);
__builtin_arc_sr(creg, QM_SS_CREG_BASE + QM_SS_IO_CREG_MST0_CTRL);
/* Wait for the calibrate command to complete. */
while (!(__builtin_arc_lr(QM_SS_CREG_BASE + QM_SS_IO_CREG_SLV0_OBSR) &
QM_SS_ADC_CAL_ACK)) {
}
/* Clear the calibration request reg. */
QM_SS_REG_AUX_NAND(QM_SS_CREG_BASE + QM_SS_IO_CREG_MST0_CTRL,
QM_SS_ADC_CAL_REQ);
/* Disable the ADC. */
disable_adc();
return 0;
}
int qm_ss_adc_irq_calibrate(const qm_ss_adc_t adc,
void (*callback)(void *data, int error,
qm_ss_adc_status_t status,
qm_ss_adc_cb_source_t source),
void *callback_data)
{
uint32_t creg;
QM_CHECK(adc < QM_SS_ADC_NUM, -EINVAL);
cal_callback[adc] = callback;
cal_callback_data[adc] = callback_data;
/* Enable the ADC. */
enable_adc();
/* Issue the start calibrate command. */
creg = __builtin_arc_lr(QM_SS_CREG_BASE + QM_SS_IO_CREG_MST0_CTRL);
creg &= ~(QM_SS_ADC_CAL_CMD_MASK | QM_SS_ADC_CAL_REQ);
creg |= ((QM_SS_ADC_CMD_START_CAL << QM_SS_ADC_CAL_CMD_OFFSET) |
QM_SS_ADC_CAL_REQ);
__builtin_arc_sr(creg, QM_SS_CREG_BASE + QM_SS_IO_CREG_MST0_CTRL);
return 0;
}
int qm_ss_adc_set_calibration(const qm_ss_adc_t adc __attribute__((unused)),
const qm_ss_adc_calibration_t cal_data)
{
uint32_t creg;
QM_CHECK(adc < QM_SS_ADC_NUM, -EINVAL);
QM_CHECK(cal_data <= QM_SS_ADC_CAL_MAX, -EINVAL);
/* Issue the load calibrate command. */
creg = __builtin_arc_lr(QM_SS_CREG_BASE + QM_SS_IO_CREG_MST0_CTRL);
creg &= ~(QM_SS_ADC_CAL_VAL_SET_MASK | QM_SS_ADC_CAL_CMD_MASK |
QM_SS_ADC_CAL_REQ);
creg |= ((cal_data << QM_SS_ADC_CAL_VAL_SET_OFFSET) |
(QM_SS_ADC_CMD_LOAD_CAL << QM_SS_ADC_CAL_CMD_OFFSET) |
QM_SS_ADC_CAL_REQ);
__builtin_arc_sr(creg, QM_SS_CREG_BASE + QM_SS_IO_CREG_MST0_CTRL);
/* Wait for the calibrate command to complete. */
while (!(__builtin_arc_lr(QM_SS_CREG_BASE + QM_SS_IO_CREG_SLV0_OBSR) &
QM_SS_ADC_CAL_ACK)) {
}
/* Clear the calibration request reg. */
QM_SS_REG_AUX_NAND(QM_SS_CREG_BASE + QM_SS_IO_CREG_MST0_CTRL,
QM_SS_ADC_CAL_REQ);
return 0;
}
int qm_ss_adc_get_calibration(const qm_ss_adc_t adc __attribute__((unused)),
qm_ss_adc_calibration_t *const cal)
{
QM_CHECK(adc < QM_SS_ADC_NUM, -EINVAL);
QM_CHECK(NULL != cal, -EINVAL);
*cal = ((__builtin_arc_lr(QM_SS_CREG_BASE + QM_SS_IO_CREG_SLV0_OBSR) &
QM_SS_ADC_CAL_VAL_GET_MASK) >>
QM_SS_ADC_CAL_VAL_GET_OFFSET);
return 0;
}
int qm_ss_adc_convert(const qm_ss_adc_t adc, qm_ss_adc_xfer_t *xfer)
{
uint32_t reg, i;
uint32_t controller = adc_base[adc];
int res;
QM_CHECK(adc < QM_SS_ADC_NUM, -EINVAL);
QM_CHECK(NULL != xfer, -EINVAL);
QM_CHECK(NULL != xfer->ch, -EINVAL);
QM_CHECK(NULL != xfer->samples, -EINVAL);
QM_CHECK(xfer->ch_len > 0, -EINVAL);
QM_CHECK(xfer->ch_len <= QM_SS_ADC_CHAN_SEQ_MAX, -EINVAL);
QM_CHECK(xfer->samples_len > 0, -EINVAL);
QM_CHECK(xfer->samples_len <= QM_SS_ADC_FIFO_LEN, -EINVAL);
/* Flush the FIFO. */
QM_SS_REG_AUX_OR(controller + QM_SS_ADC_SET, QM_SS_ADC_SET_FLUSH_RX);
/* Populate the sequence table. */
setup_seq_table(adc, xfer, true);
/* Set the threshold. */
reg = __builtin_arc_lr(controller + QM_SS_ADC_SET);
reg &= ~QM_SS_ADC_SET_THRESHOLD_MASK;
reg |= ((xfer->samples_len - 1) << QM_SS_ADC_SET_THRESHOLD_OFFSET);
__builtin_arc_sr(reg, controller + QM_SS_ADC_SET);
/* Set the sequence mode to single run. */
QM_SS_REG_AUX_NAND(controller + QM_SS_ADC_SET, QM_SS_ADC_SET_SEQ_MODE);
/* Mask all interrupts. */
QM_SS_REG_AUX_OR(controller + QM_SS_ADC_CTRL,
QM_SS_ADC_CTRL_MSK_ALL_INT);
/* Enable the ADC. */
enable_adc();
/* Start the sequencer. */
QM_SS_REG_AUX_OR(controller + QM_SS_ADC_CTRL, QM_SS_ADC_CTRL_SEQ_START);
/* Wait for the sequence to finish. */
while (!(res = __builtin_arc_lr(controller + QM_SS_ADC_INTSTAT))) {
}
/* Return if we get an error (UNDERFLOW, OVERFLOW, SEQ_ERROR). */
if (res > 1) {
return -EIO;
}
/* Read the samples into the array. */
for (i = 0; i < xfer->samples_len; i++) {
/* Pop one sample into the sample register. */
QM_SS_REG_AUX_OR(controller + QM_SS_ADC_SET,
QM_SS_ADC_SET_POP_RX);
/* Read the sample in the array. */
xfer->samples[i] =
(__builtin_arc_lr(controller + QM_SS_ADC_SAMPLE) >>
(ADC_SAMPLE_SHIFT - resolution[adc]));
}
/* Clear the data available status register. */
QM_SS_REG_AUX_OR(controller + QM_SS_ADC_CTRL,
QM_SS_ADC_CTRL_CLR_DATA_A);
/* Disable the ADC. */
disable_adc();
return 0;
}
int qm_ss_adc_irq_convert(const qm_ss_adc_t adc, qm_ss_adc_xfer_t *xfer)
{
uint32_t reg;
uint32_t controller = adc_base[adc];
QM_CHECK(adc < QM_SS_ADC_NUM, -EINVAL);
QM_CHECK(NULL != xfer, -EINVAL);
QM_CHECK(NULL != xfer->ch, -EINVAL);
QM_CHECK(NULL != xfer->samples, -EINVAL);
QM_CHECK(xfer->ch_len > 0, -EINVAL);
QM_CHECK(xfer->samples_len > 0, -EINVAL);
QM_CHECK(xfer->ch_len <= QM_SS_ADC_CHAN_SEQ_MAX, -EINVAL);
/* Flush the FIFO. */
QM_SS_REG_AUX_OR(controller + QM_SS_ADC_SET, QM_SS_ADC_SET_FLUSH_RX);
/* Populate the sequence table. */
setup_seq_table(adc, xfer, false);
/* Copy the xfer struct so we can get access from the ISR. */
memcpy(&irq_xfer[adc], xfer, sizeof(qm_ss_adc_xfer_t));
/* Set count back to 0. */
count[adc] = 0;
/* Set the threshold. */
reg = __builtin_arc_lr(controller + QM_SS_ADC_SET);
reg &= ~QM_SS_ADC_SET_THRESHOLD_MASK;
reg |= (FIFO_INTERRUPT_THRESHOLD - 1) << QM_SS_ADC_SET_THRESHOLD_OFFSET;
__builtin_arc_sr(reg, controller + QM_SS_ADC_SET);
/* Set the sequence mode to repetitive. */
QM_SS_REG_AUX_OR(controller + QM_SS_ADC_SET, QM_SS_ADC_SET_SEQ_MODE);
/* Enable all interrupts. */
QM_SS_REG_AUX_NAND(controller + QM_SS_ADC_CTRL, 0x1F00);
/* Enable the ADC. */
enable_adc();
/* Start the sequencer. */
QM_SS_REG_AUX_OR(controller + QM_SS_ADC_CTRL, QM_SS_ADC_CTRL_SEQ_START);
return 0;
}

View file

@ -0,0 +1,158 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "qm_ss_gpio.h"
static void (*callback[QM_SS_GPIO_NUM])(void *data, uint32_t int_status);
static void *callback_data[QM_SS_GPIO_NUM];
static uint32_t gpio_base[QM_SS_GPIO_NUM] = {QM_SS_GPIO_0_BASE,
QM_SS_GPIO_1_BASE};
static void ss_gpio_isr_handler(qm_ss_gpio_t gpio)
{
uint32_t int_status = 0;
uint32_t controller = gpio_base[gpio];
int_status = __builtin_arc_lr(controller + QM_SS_GPIO_INTSTATUS);
if (callback[gpio]) {
callback[gpio](callback_data, int_status);
}
__builtin_arc_sr(int_status, controller + QM_SS_GPIO_PORTA_EOI);
}
QM_ISR_DECLARE(qm_ss_gpio_isr_0)
{
ss_gpio_isr_handler(QM_SS_GPIO_0);
}
QM_ISR_DECLARE(qm_ss_gpio_isr_1)
{
ss_gpio_isr_handler(QM_SS_GPIO_1);
}
int qm_ss_gpio_set_config(const qm_ss_gpio_t gpio,
const qm_ss_gpio_port_config_t *const cfg)
{
uint32_t controller;
QM_CHECK(gpio < QM_SS_GPIO_NUM, -EINVAL);
QM_CHECK(cfg != NULL, -EINVAL);
controller = gpio_base[gpio];
__builtin_arc_sr(0xFFFFFFFF, controller + QM_SS_GPIO_INTMASK);
__builtin_arc_sr(cfg->direction, controller + QM_SS_GPIO_SWPORTA_DDR);
__builtin_arc_sr(cfg->int_type, controller + QM_SS_GPIO_INTTYPE_LEVEL);
__builtin_arc_sr(cfg->int_polarity,
controller + QM_SS_GPIO_INT_POLARITY);
__builtin_arc_sr(cfg->int_debounce, controller + QM_SS_GPIO_DEBOUNCE);
callback[gpio] = cfg->callback;
callback_data[gpio] = cfg->callback_data;
__builtin_arc_sr(cfg->int_en, controller + QM_SS_GPIO_INTEN);
__builtin_arc_sr(~cfg->int_en, controller + QM_SS_GPIO_INTMASK);
return 0;
}
int qm_ss_gpio_read_pin(const qm_ss_gpio_t gpio, const uint8_t pin,
qm_ss_gpio_state_t *const state)
{
QM_CHECK(gpio < QM_SS_GPIO_NUM, -EINVAL);
QM_CHECK(pin <= QM_SS_GPIO_NUM_PINS, -EINVAL);
QM_CHECK(state != NULL, -EINVAL);
*state =
((__builtin_arc_lr(gpio_base[gpio] + QM_SS_GPIO_EXT_PORTA) >> pin) &
1);
return 0;
}
int qm_ss_gpio_set_pin(const qm_ss_gpio_t gpio, const uint8_t pin)
{
uint32_t val;
QM_CHECK(gpio < QM_SS_GPIO_NUM, -EINVAL);
QM_CHECK(pin <= QM_SS_GPIO_NUM_PINS, -EINVAL);
val = __builtin_arc_lr(gpio_base[gpio] + QM_SS_GPIO_SWPORTA_DR) |
BIT(pin);
__builtin_arc_sr(val, gpio_base[gpio] + QM_SS_GPIO_SWPORTA_DR);
return 0;
}
int qm_ss_gpio_clear_pin(const qm_ss_gpio_t gpio, const uint8_t pin)
{
uint32_t val;
QM_CHECK(gpio < QM_SS_GPIO_NUM, -EINVAL);
QM_CHECK(pin <= QM_SS_GPIO_NUM_PINS, -EINVAL);
val = __builtin_arc_lr(gpio_base[gpio] + QM_SS_GPIO_SWPORTA_DR);
val &= ~BIT(pin);
__builtin_arc_sr(val, gpio_base[gpio] + QM_SS_GPIO_SWPORTA_DR);
return 0;
}
int qm_ss_gpio_set_pin_state(const qm_ss_gpio_t gpio, const uint8_t pin,
const qm_ss_gpio_state_t state)
{
uint32_t val;
QM_CHECK(gpio < QM_SS_GPIO_NUM, -EINVAL);
QM_CHECK(state < QM_SS_GPIO_STATE_NUM, -EINVAL);
val = __builtin_arc_lr(gpio_base[gpio] + QM_SS_GPIO_SWPORTA_DR);
val ^= (-state ^ val) & (1 << pin);
__builtin_arc_sr(val, gpio_base[gpio] + QM_SS_GPIO_SWPORTA_DR);
return 0;
}
int qm_ss_gpio_read_port(const qm_ss_gpio_t gpio, uint32_t *const port)
{
QM_CHECK(gpio < QM_SS_GPIO_NUM, -EINVAL);
QM_CHECK(port != NULL, -EINVAL);
*port = (__builtin_arc_lr(gpio_base[gpio] + QM_SS_GPIO_EXT_PORTA));
return 0;
}
int qm_ss_gpio_write_port(const qm_ss_gpio_t gpio, const uint32_t val)
{
QM_CHECK(gpio < QM_SS_GPIO_NUM, -EINVAL);
__builtin_arc_sr(val, gpio_base[gpio] + QM_SS_GPIO_SWPORTA_DR);
return 0;
}

View file

@ -0,0 +1,681 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#define SPK_LEN_SS (1)
#define SPK_LEN_FS (2)
#define TX_TL (2)
#define RX_TL (5)
#include <string.h>
#include "qm_ss_i2c.h"
#include "clk.h"
/*
* NOTE: There are a number of differences between this Sensor Subsystem I2C
* driver and the Lakemont version. The IP is not the same, the
* functionality is a subset of the features contained on the Lakemont
* version:
* 1. Fast Mode Plus is not supported
* 2. Slave mode is not supported
*
* The registers are different and the register set is compressed.
* Some noteworthy differences are:
* 1. Clock enable is contained in the QM_SS_I2C_CON register
* 2. SPKLEN is contained in the QM_SS_I2C_CON register
* 3. The high and low count values are contained within a single
* register
* 4. There is no raw interrupt status register, QM_SS_I2C_INT_STAT
* takes its place and is non-maskable
* 5. There is a reduced number of TX abrt source status bits
* 6. The QM_SS_I2C_DATA_CMD register is different and requires the
* strobe bit to be written to indicate a QM_SS_I2C_DATA_CMD
* register update. There is a push and pop mechanism for using
* the FIFO.
*/
static uint32_t i2c_base[QM_SS_I2C_NUM] = {QM_SS_I2C_0_BASE, QM_SS_I2C_1_BASE};
static qm_ss_i2c_transfer_t i2c_transfer[QM_SS_I2C_NUM];
static uint32_t i2c_write_pos[QM_SS_I2C_NUM], i2c_read_pos[QM_SS_I2C_NUM],
i2c_read_buffer_remaining[QM_SS_I2C_NUM];
static void controller_enable(const qm_ss_i2c_t i2c);
static void controller_disable(const qm_ss_i2c_t i2c);
static void qm_ss_i2c_isr_handler(const qm_ss_i2c_t i2c)
{
uint32_t controller = i2c_base[i2c], data_cmd = 0,
count_tx = (QM_SS_I2C_FIFO_SIZE - TX_TL);
qm_ss_i2c_status_t status = 0;
int rc = 0;
/* Check for errors */
QM_ASSERT(!(__builtin_arc_lr(controller + QM_SS_I2C_INTR_STAT) &
QM_SS_I2C_INTR_STAT_TX_OVER));
QM_ASSERT(!(__builtin_arc_lr(controller + QM_SS_I2C_INTR_STAT) &
QM_SS_I2C_INTR_STAT_RX_UNDER));
QM_ASSERT(!(__builtin_arc_lr(controller + QM_SS_I2C_INTR_STAT) &
QM_SS_I2C_INTR_STAT_RX_OVER));
if ((__builtin_arc_lr(controller + QM_SS_I2C_INTR_STAT) &
QM_SS_I2C_INTR_STAT_TX_ABRT)) {
QM_ASSERT(
!(__builtin_arc_lr(controller + QM_SS_I2C_TX_ABRT_SOURCE) &
QM_SS_I2C_TX_ABRT_SBYTE_NORSTRT));
status =
(__builtin_arc_lr(controller + QM_SS_I2C_TX_ABRT_SOURCE) &
QM_SS_I2C_TX_ABRT_SOURCE_ALL_MASK);
/* clear intr */
__builtin_arc_sr(QM_SS_I2C_INTR_CLR_TX_ABRT,
controller + QM_SS_I2C_INTR_CLR);
/* mask interrupts */
__builtin_arc_sr(QM_SS_I2C_INTR_MASK_ALL,
controller + QM_SS_I2C_INTR_MASK);
rc = (status & QM_I2C_TX_ABRT_USER_ABRT) ? -ECANCELED : -EIO;
if (i2c_transfer[i2c].callback) {
i2c_transfer[i2c].callback(
i2c_transfer[i2c].callback_data, rc, status, 0);
}
}
/* RX read from buffer */
if ((__builtin_arc_lr(controller + QM_SS_I2C_INTR_STAT) &
QM_SS_I2C_INTR_STAT_RX_FULL)) {
while (i2c_read_buffer_remaining[i2c] &&
(__builtin_arc_lr(controller + QM_SS_I2C_RXFLR))) {
__builtin_arc_sr(QM_SS_I2C_DATA_CMD_POP,
controller + QM_SS_I2C_DATA_CMD);
/* IC_DATA_CMD[7:0] contains received data */
i2c_transfer[i2c].rx[i2c_read_pos[i2c]] =
__builtin_arc_lr(controller + QM_SS_I2C_DATA_CMD);
i2c_read_buffer_remaining[i2c]--;
i2c_read_pos[i2c]++;
if (i2c_read_buffer_remaining[i2c] == 0) {
/* mask rx full interrupt if transfer
* complete
*/
QM_SS_REG_AUX_NAND(
(controller + QM_SS_I2C_INTR_MASK),
QM_SS_I2C_INTR_MASK_RX_FULL);
if (i2c_transfer[i2c].stop) {
controller_disable(i2c);
}
if (i2c_transfer[i2c].callback) {
i2c_transfer[i2c].callback(
i2c_transfer[i2c].callback_data, 0,
QM_I2C_IDLE, i2c_read_pos[i2c]);
}
}
}
if (i2c_read_buffer_remaining[i2c] > 0 &&
i2c_read_buffer_remaining[i2c] < (RX_TL + 1)) {
/* Adjust the RX threshold so the next 'RX_FULL'
* interrupt is generated when all the remaining
* data are received.
*/
QM_SS_REG_AUX_NAND((controller + QM_SS_I2C_TL),
QM_SS_I2C_TL_RX_TL_MASK);
QM_SS_REG_AUX_OR((controller + QM_SS_I2C_TL),
(i2c_read_buffer_remaining[i2c] - 1));
}
/* RX_FULL INTR is autocleared when the buffer
* levels goes below the threshold
*/
}
if ((__builtin_arc_lr(controller + QM_SS_I2C_INTR_STAT) &
QM_SS_I2C_INTR_STAT_TX_EMPTY)) {
if ((__builtin_arc_lr(controller + QM_SS_I2C_STATUS) &
QM_SS_I2C_STATUS_TFE) &&
(i2c_transfer[i2c].tx != NULL) &&
(i2c_transfer[i2c].tx_len == 0) &&
(i2c_transfer[i2c].rx_len == 0)) {
QM_SS_REG_AUX_NAND((controller + QM_SS_I2C_INTR_MASK),
QM_SS_I2C_INTR_MASK_TX_EMPTY);
/* if this is not a combined
* transaction, disable the controller now
*/
if ((i2c_read_buffer_remaining[i2c] == 0) &&
i2c_transfer[i2c].stop) {
controller_disable(i2c);
/* callback */
if (i2c_transfer[i2c].callback) {
i2c_transfer[i2c].callback(
i2c_transfer[i2c].callback_data, 0,
QM_I2C_IDLE, i2c_write_pos[i2c]);
}
}
}
while ((count_tx) && i2c_transfer[i2c].tx_len) {
count_tx--;
/* write command -IC_DATA_CMD[8] = 0 */
/* fill IC_DATA_CMD[7:0] with the data */
data_cmd = QM_SS_I2C_DATA_CMD_PUSH |
i2c_transfer[i2c].tx[i2c_write_pos[i2c]];
i2c_transfer[i2c].tx_len--;
/* if transfer is a combined transfer, only
* send stop at
* end of the transfer sequence */
if (i2c_transfer[i2c].stop &&
(i2c_transfer[i2c].tx_len == 0) &&
(i2c_transfer[i2c].rx_len == 0)) {
data_cmd |= QM_SS_I2C_DATA_CMD_STOP;
}
/* write data */
__builtin_arc_sr(data_cmd,
controller + QM_SS_I2C_DATA_CMD);
i2c_write_pos[i2c]++;
/* TX_EMPTY INTR is autocleared when the buffer
* levels goes above the threshold
*/
}
/* TX read command */
count_tx =
QM_SS_I2C_FIFO_SIZE -
(__builtin_arc_lr(controller + QM_SS_I2C_TXFLR) +
(__builtin_arc_lr(controller + QM_SS_I2C_RXFLR) + 1));
while (i2c_transfer[i2c].rx_len &&
(i2c_transfer[i2c].tx_len == 0) && count_tx) {
count_tx--;
i2c_transfer[i2c].rx_len--;
/* if transfer is a combined transfer, only
* send stop at
* end of
* the transfer sequence */
if (i2c_transfer[i2c].stop &&
(i2c_transfer[i2c].rx_len == 0) &&
(i2c_transfer[i2c].tx_len == 0)) {
__builtin_arc_sr((QM_SS_I2C_DATA_CMD_CMD |
QM_SS_I2C_DATA_CMD_PUSH |
QM_SS_I2C_DATA_CMD_STOP),
controller +
QM_SS_I2C_DATA_CMD);
} else {
__builtin_arc_sr((QM_SS_I2C_DATA_CMD_CMD |
QM_SS_I2C_DATA_CMD_PUSH),
controller +
QM_SS_I2C_DATA_CMD);
}
}
/* generate a tx_empty interrupt when tx fifo is fully
* empty */
if ((i2c_transfer[i2c].tx_len == 0) &&
(i2c_transfer[i2c].rx_len == 0)) {
QM_SS_REG_AUX_NAND((controller + QM_SS_I2C_TL),
QM_SS_I2C_TL_TX_TL_MASK);
}
}
}
QM_ISR_DECLARE(qm_ss_i2c_isr_0)
{
qm_ss_i2c_isr_handler(QM_SS_I2C_0);
}
QM_ISR_DECLARE(qm_ss_i2c_isr_1)
{
qm_ss_i2c_isr_handler(QM_SS_I2C_1);
}
static uint32_t get_lo_cnt(uint32_t lo_time_ns)
{
return (((clk_sys_get_ticks_per_us() * lo_time_ns) / 1000) - 1);
}
static uint32_t get_hi_cnt(qm_ss_i2c_t i2c, uint32_t hi_time_ns)
{
uint32_t controller = i2c_base[i2c];
return (((clk_sys_get_ticks_per_us() * hi_time_ns) / 1000) - 7 -
((__builtin_arc_lr(controller + QM_SS_I2C_CON) &
QM_SS_I2C_CON_SPKLEN_MASK) >>
QM_SS_I2C_CON_SPKLEN_OFFSET));
}
int qm_ss_i2c_set_config(const qm_ss_i2c_t i2c,
const qm_ss_i2c_config_t *const cfg)
{
uint32_t controller = i2c_base[i2c], lcnt = 0, hcnt = 0, full_cnt = 0,
min_lcnt = 0, lcnt_diff = 0,
con = (__builtin_arc_lr(controller + QM_SS_I2C_CON) &
QM_SS_I2C_CON_CLK_ENA);
QM_CHECK(i2c < QM_SS_I2C_NUM, -EINVAL);
QM_CHECK(cfg != NULL, -EINVAL);
/* mask all interrupts */
__builtin_arc_sr(QM_SS_I2C_INTR_MASK_ALL,
controller + QM_SS_I2C_INTR_MASK);
/* disable controller */
controller_disable(i2c);
/* set mode */
con |= QM_SS_I2C_CON_RESTART_EN |
/* set 7/10 bit address mode */
(cfg->address_mode << QM_SS_I2C_CON_IC_10BITADDR_OFFSET);
/*
* Timing generation algorithm:
* 1. compute hi/lo count so as to achieve the desired bus
* speed at 50% duty cycle
* 2. adjust the hi/lo count to ensure that minimum hi/lo
* timings are guaranteed as per spec.
*/
switch (cfg->speed) {
case QM_SS_I2C_SPEED_STD:
con |= QM_SS_I2C_CON_SPEED_SS |
SPK_LEN_SS << QM_SS_I2C_CON_SPKLEN_OFFSET;
__builtin_arc_sr(con, controller + QM_SS_I2C_CON);
min_lcnt = get_lo_cnt(QM_I2C_MIN_SS_NS);
lcnt = get_lo_cnt(QM_I2C_SS_50_DC_NS);
hcnt = get_hi_cnt(i2c, QM_I2C_SS_50_DC_NS);
break;
case QM_SS_I2C_SPEED_FAST:
con |= QM_SS_I2C_CON_SPEED_FS |
SPK_LEN_FS << QM_SS_I2C_CON_SPKLEN_OFFSET;
__builtin_arc_sr(con, controller + QM_SS_I2C_CON);
min_lcnt = get_lo_cnt(QM_I2C_MIN_FS_NS);
lcnt = get_lo_cnt(QM_I2C_FS_50_DC_NS);
hcnt = get_hi_cnt(i2c, QM_I2C_FS_50_DC_NS);
break;
}
if (hcnt > QM_SS_I2C_IC_HCNT_MAX || hcnt < QM_SS_I2C_IC_HCNT_MIN) {
return -EINVAL;
}
if (lcnt > QM_SS_I2C_IC_LCNT_MAX || lcnt < QM_SS_I2C_IC_LCNT_MIN) {
return -EINVAL;
}
/* Increment minimum low count to account for rounding down */
min_lcnt++;
if (lcnt < min_lcnt) {
lcnt_diff = (min_lcnt - lcnt);
lcnt += (lcnt_diff);
hcnt -= (lcnt_diff);
}
full_cnt = (lcnt & 0xFFFF) |
(hcnt & 0xFFFF) << QM_SS_I2C_SS_FS_SCL_CNT_HCNT_OFFSET;
if (QM_SS_I2C_SPEED_STD == cfg->speed) {
__builtin_arc_sr(full_cnt, controller + QM_SS_I2C_SS_SCL_CNT);
} else {
__builtin_arc_sr(full_cnt, controller + QM_SS_I2C_FS_SCL_CNT);
}
return 0;
}
int qm_ss_i2c_set_speed(const qm_ss_i2c_t i2c, const qm_ss_i2c_speed_t speed,
const uint16_t lo_cnt, const uint16_t hi_cnt)
{
uint32_t full_cnt = 0, controller = i2c_base[i2c],
con = __builtin_arc_lr(controller + QM_SS_I2C_CON);
QM_CHECK(i2c < QM_SS_I2C_NUM, -EINVAL);
QM_CHECK(hi_cnt < QM_SS_I2C_IC_HCNT_MAX &&
lo_cnt > QM_SS_I2C_IC_HCNT_MIN,
-EINVAL);
QM_CHECK(lo_cnt < QM_SS_I2C_IC_LCNT_MAX &&
lo_cnt > QM_SS_I2C_IC_LCNT_MIN,
-EINVAL);
con &= ~QM_SS_I2C_CON_SPEED_MASK;
full_cnt = (lo_cnt & QM_SS_I2C_SS_FS_SCL_CNT_16BIT_MASK) |
(hi_cnt & QM_SS_I2C_SS_FS_SCL_CNT_16BIT_MASK)
<< QM_SS_I2C_SS_FS_SCL_CNT_HCNT_OFFSET;
switch (speed) {
case QM_SS_I2C_SPEED_STD:
con |= QM_SS_I2C_CON_SPEED_SS;
__builtin_arc_sr(full_cnt, controller + QM_SS_I2C_SS_SCL_CNT);
break;
case QM_SS_I2C_SPEED_FAST:
con |= QM_SS_I2C_CON_SPEED_FS;
__builtin_arc_sr(full_cnt, controller + QM_SS_I2C_FS_SCL_CNT);
break;
}
__builtin_arc_sr(con, controller + QM_SS_I2C_CON);
return 0;
}
int qm_ss_i2c_get_status(const qm_ss_i2c_t i2c,
qm_ss_i2c_status_t *const status)
{
uint32_t controller = i2c_base[i2c];
QM_CHECK(status != NULL, -EINVAL);
*status = 0;
/* check if slave or master are active */
if (__builtin_arc_lr(controller + QM_SS_I2C_STATUS) &
QM_SS_I2C_STATUS_BUSY_MASK) {
*status |= QM_I2C_BUSY;
}
/* check for abort status */
*status |= (__builtin_arc_lr(controller + QM_SS_I2C_TX_ABRT_SOURCE) &
QM_SS_I2C_TX_ABRT_SOURCE_ALL_MASK);
return 0;
}
int qm_ss_i2c_master_write(const qm_ss_i2c_t i2c, const uint16_t slave_addr,
const uint8_t *const data, uint32_t len,
const bool stop, qm_ss_i2c_status_t *const status)
{
uint8_t *d = (uint8_t *)data;
uint32_t controller = i2c_base[i2c],
con = __builtin_arc_lr(controller + QM_SS_I2C_CON),
data_cmd = 0;
int ret = 0;
QM_CHECK(i2c < QM_SS_I2C_NUM, -EINVAL);
QM_CHECK(data != NULL, -EINVAL);
QM_CHECK(len > 0, -EINVAL);
/* write slave address to TAR */
con &= ~QM_SS_I2C_CON_TAR_SAR_MASK;
con |= (slave_addr & QM_SS_I2C_CON_TAR_SAR_10_BIT_MASK)
<< QM_SS_I2C_CON_TAR_SAR_OFFSET;
__builtin_arc_sr(con, controller + QM_SS_I2C_CON);
/* enable controller */
controller_enable(i2c);
while (len--) {
/* wait if FIFO is full */
while (!((__builtin_arc_lr(controller + QM_SS_I2C_STATUS)) &
QM_SS_I2C_STATUS_TFNF))
;
/* write command -IC_DATA_CMD[8] = 0 */
/* fill IC_DATA_CMD[7:0] with the data */
data_cmd = *d;
data_cmd |= QM_SS_I2C_DATA_CMD_PUSH;
/* send stop after last byte */
if (len == 0 && stop) {
data_cmd |= QM_SS_I2C_DATA_CMD_STOP;
}
__builtin_arc_sr(data_cmd, controller + QM_SS_I2C_DATA_CMD);
d++;
}
/* this is a blocking call, wait until FIFO is empty or tx abrt
* error */
while (!(__builtin_arc_lr(controller + QM_SS_I2C_STATUS) &
QM_SS_I2C_STATUS_TFE))
;
if ((__builtin_arc_lr(controller + QM_SS_I2C_INTR_STAT) &
QM_SS_I2C_INTR_STAT_TX_ABRT)) {
ret = -EIO;
}
/* disable controller */
if (true == stop) {
controller_disable(i2c);
}
if (status != NULL) {
qm_ss_i2c_get_status(i2c, status);
}
/* Clear abort status
* The controller flushes/resets/empties
* the TX FIFO whenever this bit is set. The TX
* FIFO remains in this flushed state until the
* register IC_CLR_TX_ABRT is read.
*/
__builtin_arc_sr(QM_SS_I2C_INTR_CLR_TX_ABRT,
controller + QM_SS_I2C_INTR_CLR);
return ret;
}
int qm_ss_i2c_master_read(const qm_ss_i2c_t i2c, const uint16_t slave_addr,
uint8_t *const data, uint32_t len, const bool stop,
qm_ss_i2c_status_t *const status)
{
uint32_t controller = i2c_base[i2c],
con = __builtin_arc_lr(controller + QM_SS_I2C_CON),
data_cmd = QM_SS_I2C_DATA_CMD_CMD | QM_SS_I2C_DATA_CMD_PUSH;
uint8_t *d = (uint8_t *)data;
int ret = 0;
QM_CHECK(i2c < QM_SS_I2C_NUM, -EINVAL);
QM_CHECK(data != NULL, -EINVAL);
QM_CHECK(len > 0, -EINVAL);
/* write slave address to TAR */
con &= ~QM_SS_I2C_CON_TAR_SAR_MASK;
con |= (slave_addr & QM_SS_I2C_CON_TAR_SAR_10_BIT_MASK)
<< QM_SS_I2C_CON_TAR_SAR_OFFSET;
__builtin_arc_sr(con, controller + QM_SS_I2C_CON);
/* enable controller */
controller_enable(i2c);
while (len--) {
if (len == 0 && stop) {
data_cmd |= QM_SS_I2C_DATA_CMD_STOP;
}
__builtin_arc_sr(data_cmd, controller + QM_SS_I2C_DATA_CMD);
/* wait if rx fifo is empty, break if tx empty and
* error*/
while (!(__builtin_arc_lr(controller + QM_SS_I2C_STATUS) &
QM_SS_I2C_STATUS_RFNE)) {
if (__builtin_arc_lr(controller + QM_SS_I2C_INTR_STAT) &
QM_SS_I2C_INTR_STAT_TX_ABRT) {
break;
}
}
if ((__builtin_arc_lr(controller + QM_SS_I2C_INTR_STAT) &
QM_SS_I2C_INTR_STAT_TX_ABRT)) {
ret = -EIO;
break;
}
__builtin_arc_sr(QM_SS_I2C_DATA_CMD_POP,
controller + QM_SS_I2C_DATA_CMD);
/* wait until rx fifo is empty, indicating pop is complete*/
while ((__builtin_arc_lr(controller + QM_SS_I2C_STATUS) &
QM_SS_I2C_STATUS_RFNE));
/* IC_DATA_CMD[7:0] contains received data */
*d = __builtin_arc_lr(controller + QM_SS_I2C_DATA_CMD);
d++;
}
/* disable controller */
if (true == stop) {
controller_disable(i2c);
}
if (status != NULL) {
qm_ss_i2c_get_status(i2c, status);
}
/* Clear abort status
* The controller flushes/resets/empties
* the TX FIFO whenever this bit is set. The TX
* FIFO remains in this flushed state until the
* register IC_CLR_TX_ABRT is read.
*/
__builtin_arc_sr(QM_SS_I2C_INTR_CLR_TX_ABRT,
controller + QM_SS_I2C_INTR_CLR);
return ret;
}
int qm_ss_i2c_master_irq_transfer(const qm_ss_i2c_t i2c,
const qm_ss_i2c_transfer_t *const xfer,
const uint16_t slave_addr)
{
uint32_t controller = i2c_base[i2c],
con = __builtin_arc_lr(controller + QM_SS_I2C_CON);
QM_CHECK(i2c < QM_SS_I2C_NUM, -EINVAL);
QM_CHECK(NULL != xfer, -EINVAL);
/* write slave address to TAR */
con &= ~QM_SS_I2C_CON_TAR_SAR_MASK;
con |= (slave_addr & QM_SS_I2C_CON_TAR_SAR_10_BIT_MASK)
<< QM_SS_I2C_CON_TAR_SAR_OFFSET;
__builtin_arc_sr(con, controller + QM_SS_I2C_CON);
i2c_write_pos[i2c] = 0;
i2c_read_pos[i2c] = 0;
i2c_read_buffer_remaining[i2c] = xfer->rx_len;
memcpy(&i2c_transfer[i2c], xfer, sizeof(i2c_transfer[i2c]));
/* set threshold */
if (xfer->rx_len > 0 && xfer->rx_len < (RX_TL + 1)) {
/* If 'rx_len' is less than the default threshold, we have to
* change the threshold value so the 'RX FULL' interrupt is
* generated once all data from the transfer is received.
*/
__builtin_arc_sr(
((TX_TL << QM_SS_I2C_TL_TX_TL_OFFSET) | (xfer->rx_len - 1)),
controller + QM_SS_I2C_TL);
} else {
__builtin_arc_sr(((TX_TL << QM_SS_I2C_TL_TX_TL_OFFSET) | RX_TL),
controller + QM_SS_I2C_TL);
}
/* mask interrupts */
__builtin_arc_sr(QM_SS_I2C_INTR_MASK_ALL,
controller + QM_SS_I2C_INTR_MASK);
/* enable controller */
controller_enable(i2c);
/* unmask interrupts */
__builtin_arc_sr(
(QM_SS_I2C_INTR_MASK_TX_ABRT | QM_SS_I2C_INTR_MASK_TX_EMPTY |
QM_SS_I2C_INTR_MASK_TX_OVER | QM_SS_I2C_INTR_MASK_RX_FULL |
QM_SS_I2C_INTR_MASK_RX_OVER | QM_SS_I2C_INTR_MASK_RX_UNDER),
controller + QM_SS_I2C_INTR_MASK);
return 0;
}
static void controller_enable(const qm_ss_i2c_t i2c)
{
uint32_t controller = i2c_base[i2c];
if (!(__builtin_arc_lr(controller + QM_SS_I2C_ENABLE_STATUS) &
QM_SS_I2C_ENABLE_STATUS_IC_EN)) {
/* enable controller */
QM_SS_REG_AUX_OR((controller + QM_SS_I2C_CON),
QM_SS_I2C_CON_ENABLE);
/* wait until controller is enabled */
while (
!(__builtin_arc_lr(controller + QM_SS_I2C_ENABLE_STATUS) &
QM_SS_I2C_ENABLE_STATUS_IC_EN))
;
}
}
static void controller_disable(const qm_ss_i2c_t i2c)
{
uint32_t controller = i2c_base[i2c];
if (__builtin_arc_lr(controller + QM_SS_I2C_ENABLE_STATUS) &
QM_SS_I2C_ENABLE_STATUS_IC_EN) {
/* disable controller */
QM_SS_REG_AUX_NAND((controller + QM_SS_I2C_CON),
QM_SS_I2C_CON_ENABLE);
/* wait until controller is disabled */
while ((__builtin_arc_lr(controller + QM_SS_I2C_ENABLE_STATUS) &
QM_SS_I2C_ENABLE_STATUS_IC_EN))
;
}
}
int qm_ss_i2c_irq_transfer_terminate(const qm_ss_i2c_t i2c)
{
uint32_t controller = i2c_base[i2c];
QM_CHECK(i2c < QM_SS_I2C_NUM, -EINVAL);
/* Abort:
* In response to an ABORT, the controller issues a STOP and
* flushes
* the Tx FIFO after completing the current transfer, then sets
* the
* TX_ABORT interrupt after the abort operation. The ABORT bit
* is
* cleared automatically by hardware after the abort operation.
*/
QM_SS_REG_AUX_OR((controller + QM_SS_I2C_CON), QM_SS_I2C_CON_ABORT);
return 0;
}

View file

@ -0,0 +1,97 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "qm_ss_interrupt.h"
#include "qm_soc_regs.h"
#include "qm_sensor_regs.h"
/* SCSS base addr for Sensor Subsystem interrupt routing, for linear IRQ
* mapping */
#define SCSS_SS_INT_MASK_BASE (&QM_SCSS_INT->int_ss_adc_err_mask)
#define SCSS_SS_INT_MASK BIT(8) /* Sensor Subsystem interrupt masking */
#if (UNIT_TEST)
qm_ss_isr_t __ivt_vect_table[QM_SS_INT_VECTOR_NUM];
#else
extern qm_ss_isr_t __ivt_vect_table[];
#endif
void qm_ss_irq_disable(void)
{
__builtin_arc_clri();
}
void qm_ss_irq_enable(void)
{
__builtin_arc_seti(0);
}
void qm_ss_irq_mask(uint32_t irq)
{
__builtin_arc_sr(irq, QM_SS_AUX_IRQ_SELECT);
__builtin_arc_sr(QM_SS_INT_DISABLE, QM_SS_AUX_IRQ_ENABLE);
}
void qm_ss_irq_unmask(uint32_t irq)
{
__builtin_arc_sr(irq, QM_SS_AUX_IRQ_SELECT);
__builtin_arc_sr(QM_SS_INT_ENABLE, QM_SS_AUX_IRQ_ENABLE);
}
void qm_ss_int_vector_request(uint32_t vector, qm_ss_isr_t isr)
{
/* Invalidate the I-cache line which contains the irq vector. This
* will bypass I-Cach and set vector with the good isr. */
__builtin_arc_sr((uint32_t)&__ivt_vect_table[0] + (vector * 4),
QM_SS_AUX_IC_IVIL);
/* All SR accesses to the IC_IVIL register must be followed by three
* NOP instructions, see chapter 3.3.59 in the datasheet
* "ARC_V2_ProgrammersReference.pdf" */
__builtin_arc_nop();
__builtin_arc_nop();
__builtin_arc_nop();
__ivt_vect_table[vector] = isr;
}
void qm_ss_irq_request(uint32_t irq, qm_ss_isr_t isr)
{
uint32_t *scss_intmask;
uint32_t vector = irq + (QM_SS_EXCEPTION_NUM + QM_SS_INT_TIMER_NUM);
/* Guarding the IRQ set-up */
qm_ss_irq_mask(vector);
qm_ss_int_vector_request(vector, isr);
/* Route peripheral interrupt to Sensor Subsystem */
scss_intmask = (uint32_t *)SCSS_SS_INT_MASK_BASE + irq;
*scss_intmask &= ~SCSS_SS_INT_MASK;
qm_ss_irq_unmask(vector);
}

View file

@ -0,0 +1,400 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "qm_ss_spi.h"
#define FIFO_SIZE (8) /* Maximum size of RX or TX FIFO */
#define FIFO_RX_W_MARK (6) /* Interrupt mark to read RX FIFO */
#define FIFO_TX_W_MARK (3) /* Interrupt mark to write TX FIFO */
#define BYTES_PER_FRAME(reg_data) \
(((reg_data & QM_SS_SPI_CTRL_DFS_MASK) >> 3) + 1)
static uint32_t base[QM_SS_SPI_NUM] = {QM_SS_SPI_0_BASE, QM_SS_SPI_1_BASE};
static const qm_ss_spi_async_transfer_t *transfer[QM_SS_SPI_NUM];
static uint32_t rx_c[QM_SS_SPI_NUM];
static uint32_t tx_c[QM_SS_SPI_NUM];
static uint8_t *rx_p[QM_SS_SPI_NUM];
static uint8_t *tx_p[QM_SS_SPI_NUM];
static uint16_t dummy_frame;
/* Private Functions */
static void spi_disable(const qm_ss_spi_t spi)
{
/* Disable SPI device */
QM_SS_REG_AUX_NAND(base[spi] + QM_SS_SPI_SPIEN, QM_SS_SPI_SPIEN_EN);
/* MASK all interrupts. */
__builtin_arc_sr(0, base[spi] + QM_SS_SPI_INTR_MASK);
/* Clear all interrupts */
__builtin_arc_sr(QM_SS_SPI_INTR_ALL, base[spi] + QM_SS_SPI_CLR_INTR);
}
static __inline__ void fifo_write(const qm_ss_spi_t spi, void *data,
uint8_t size)
{
uint32_t dr;
if (size == 1) {
dr = *(uint8_t *)data;
} else {
dr = *(uint16_t *)data;
}
dr |= QM_SS_SPI_DR_W_MASK;
__builtin_arc_sr(dr, base[spi] + QM_SS_SPI_DR);
}
static __inline__ void fifo_read(const qm_ss_spi_t spi, void *data,
uint8_t size)
{
__builtin_arc_sr(QM_SS_SPI_DR_R_MASK, base[spi] + QM_SS_SPI_DR);
if (size == 1) {
*(uint8_t *)data = __builtin_arc_lr(base[spi] + QM_SS_SPI_DR);
} else {
*(uint16_t *)data = __builtin_arc_lr(base[spi] + QM_SS_SPI_DR);
}
}
/* Public Functions */
int qm_ss_spi_set_config(const qm_ss_spi_t spi,
const qm_ss_spi_config_t *const cfg)
{
QM_CHECK(spi < QM_SS_SPI_NUM, -EINVAL);
QM_CHECK(cfg, -EINVAL);
/* Configuration can be changed only when SPI is disabled */
/* NOTE: check if QM_ASSERT is the right thing to do here */
QM_ASSERT((__builtin_arc_lr(base[spi] + QM_SS_SPI_SPIEN) &
QM_SS_SPI_SPIEN_EN) == 0);
uint32_t ctrl = __builtin_arc_lr(QM_SS_SPI_0_BASE + QM_SS_SPI_CTRL);
ctrl &= QM_SS_SPI_CTRL_CLK_ENA;
ctrl |= cfg->frame_size << QM_SS_SPI_CTRL_DFS_OFFS;
ctrl |= cfg->transfer_mode << QM_SS_SPI_CTRL_TMOD_OFFS;
ctrl |= cfg->bus_mode << QM_SS_SPI_CTRL_BMOD_OFFS;
__builtin_arc_sr(ctrl, base[spi] + QM_SS_SPI_CTRL);
__builtin_arc_sr(cfg->clk_divider, base[spi] + QM_SS_SPI_TIMING);
return 0;
}
int qm_ss_spi_slave_select(const qm_ss_spi_t spi,
const qm_ss_spi_slave_select_t ss)
{
QM_CHECK(spi < QM_SS_SPI_NUM, -EINVAL);
/* Check if the device reports as busy. */
/* NOTE: check if QM_ASSERT is the right thing to do here */
QM_ASSERT(
!(__builtin_arc_lr(base[spi] + QM_SS_SPI_SR) & QM_SS_SPI_SR_BUSY));
uint32_t spien = __builtin_arc_lr(base[spi] + QM_SS_SPI_SPIEN);
spien &= ~QM_SS_SPI_SPIEN_SER_MASK;
spien |= (ss << QM_SS_SPI_SPIEN_SER_OFFS);
__builtin_arc_sr(spien, base[spi] + QM_SS_SPI_SPIEN);
return 0;
}
int qm_ss_spi_get_status(const qm_ss_spi_t spi,
qm_ss_spi_status_t *const status)
{
QM_CHECK(spi < QM_SS_SPI_NUM, -EINVAL);
QM_CHECK(status, -EINVAL);
if (__builtin_arc_lr(base[spi] + QM_SS_SPI_SR) & QM_SS_SPI_SR_BUSY) {
*status = QM_SS_SPI_BUSY;
} else {
*status = QM_SS_SPI_IDLE;
}
return 0;
}
int qm_ss_spi_transfer(const qm_ss_spi_t spi,
const qm_ss_spi_transfer_t *const xfer,
qm_ss_spi_status_t *const status)
{
QM_CHECK(spi < QM_SS_SPI_NUM, -EINVAL);
QM_CHECK(xfer, -EINVAL);
uint32_t ctrl = __builtin_arc_lr(base[spi] + QM_SS_SPI_CTRL);
uint8_t tmode = (uint8_t)((ctrl & QM_SS_SPI_CTRL_TMOD_MASK) >>
QM_SS_SPI_CTRL_TMOD_OFFS);
QM_CHECK(tmode == QM_SS_SPI_TMOD_TX_RX ? (xfer->tx_len == xfer->rx_len)
: 1,
-EINVAL);
QM_CHECK(tmode == QM_SS_SPI_TMOD_TX ? (xfer->rx_len == 0) : 1, -EINVAL);
QM_CHECK(tmode == QM_SS_SPI_TMOD_EEPROM_READ ? (xfer->rx_len > 0) : 1,
-EINVAL);
QM_CHECK(tmode == QM_SS_SPI_TMOD_RX ? (xfer->rx_len > 0) : 1, -EINVAL);
QM_CHECK(tmode == QM_SS_SPI_TMOD_RX ? (xfer->tx_len == 0) : 1, -EINVAL);
uint32_t tx_cnt = xfer->tx_len;
uint32_t rx_cnt = xfer->rx_len;
uint8_t *rx_buffer = xfer->rx;
uint8_t *tx_buffer = xfer->tx;
int ret = 0;
/* Disable all SPI interrupts */
__builtin_arc_sr(0, base[spi] + QM_SS_SPI_INTR_MASK);
/* Set NDF (Number of Data Frames) in RX or EEPROM Read mode. (-1) */
if (tmode == QM_SS_SPI_TMOD_RX || tmode == QM_SS_SPI_TMOD_EEPROM_READ) {
ctrl &= ~QM_SS_SPI_CTRL_NDF_MASK;
ctrl |= ((xfer->rx_len - 1) << QM_SS_SPI_CTRL_NDF_OFFS) &
QM_SS_SPI_CTRL_NDF_MASK;
__builtin_arc_sr(ctrl, base[spi] + QM_SS_SPI_CTRL);
}
/* RX only transfers need a dummy frame to be sent. */
if (tmode == QM_SS_SPI_TMOD_RX) {
tx_buffer = (uint8_t *)&dummy_frame;
tx_cnt = 1;
}
/* Calculate number of bytes per frame (1 or 2)*/
uint8_t bytes = BYTES_PER_FRAME(ctrl);
/* Enable SPI device */
QM_SS_REG_AUX_OR(base[spi] + QM_SS_SPI_SPIEN, QM_SS_SPI_SPIEN_EN);
while (tx_cnt || rx_cnt) {
uint32_t sr = __builtin_arc_lr(base[spi] + QM_SS_SPI_SR);
/* Break and report error if RX FIFO has overflown */
if (__builtin_arc_lr(base[spi] + QM_SS_SPI_INTR_STAT) &
QM_SS_SPI_INTR_RXOI) {
ret = -EIO;
if (status) {
*status |= QM_SS_SPI_RX_OVERFLOW;
}
break;
}
/* Copy data to buffer as long RX-FIFO is not empty */
if (sr & QM_SS_SPI_SR_RFNE && rx_cnt) {
fifo_read(spi, rx_buffer, bytes);
rx_buffer += bytes;
rx_cnt--;
}
/* Copy data from buffer as long TX-FIFO is not full. */
if (sr & QM_SS_SPI_SR_TFNF && tx_cnt) {
fifo_write(spi, tx_buffer, bytes);
tx_buffer += bytes;
tx_cnt--;
}
}
/* Wait for last byte transfered */
while (__builtin_arc_lr(base[spi] + QM_SS_SPI_SR) & QM_SS_SPI_SR_BUSY)
;
spi_disable(spi);
return ret;
}
/* Interrupt related functions. */
int qm_ss_spi_irq_transfer(const qm_ss_spi_t spi,
const qm_ss_spi_async_transfer_t *const xfer)
{
QM_CHECK(spi < QM_SS_SPI_NUM, -EINVAL);
QM_CHECK(xfer, -EINVAL);
/* Load and save initial control register */
uint32_t ctrl = __builtin_arc_lr(base[spi] + QM_SS_SPI_CTRL);
uint8_t tmode = (uint8_t)((ctrl & QM_SS_SPI_CTRL_TMOD_MASK) >>
QM_SS_SPI_CTRL_TMOD_OFFS);
QM_CHECK(tmode == QM_SS_SPI_TMOD_TX_RX ? (xfer->tx_len == xfer->rx_len)
: 1,
-EINVAL);
transfer[spi] = xfer;
tx_c[spi] = xfer->tx_len;
rx_c[spi] = xfer->rx_len;
tx_p[spi] = xfer->tx;
rx_p[spi] = xfer->rx;
/* RX only transfers need a dummy frame byte to be sent. */
if (tmode == QM_SS_SPI_TMOD_RX) {
tx_p[spi] = (uint8_t *)&dummy_frame;
tx_c[spi] = 1;
}
uint32_t ftlr =
(((FIFO_RX_W_MARK < xfer->rx_len ? FIFO_RX_W_MARK : xfer->rx_len) -
1)
<< QM_SS_SPI_FTLR_RFT_OFFS) &
QM_SS_SPI_FTLR_RFT_MASK;
__builtin_arc_sr(ftlr, base[spi] + QM_SS_SPI_FTLR);
/* Unmask all interrupts */
__builtin_arc_sr(QM_SS_SPI_INTR_ALL, base[spi] + QM_SS_SPI_INTR_MASK);
/* Enable SPI device */
QM_SS_REG_AUX_OR(base[spi] + QM_SS_SPI_SPIEN, QM_SS_SPI_SPIEN_EN);
return 0;
}
int qm_ss_spi_transfer_terminate(const qm_ss_spi_t spi)
{
QM_CHECK(spi < QM_SS_SPI_NUM, -EINVAL);
spi_disable(spi);
if (transfer[spi]->callback) {
uint32_t len = 0;
uint32_t ctrl = __builtin_arc_lr(base[spi] + QM_SS_SPI_CTRL);
uint8_t tmode = (uint8_t)((ctrl & QM_SS_SPI_CTRL_TMOD_MASK) >>
QM_SS_SPI_CTRL_TMOD_OFFS);
if (tmode == QM_SS_SPI_TMOD_TX ||
tmode == QM_SS_SPI_TMOD_TX_RX) {
len = transfer[spi]->tx_len - tx_c[spi];
} else {
len = transfer[spi]->rx_len - rx_c[spi];
}
/*
* NOTE: change this to return controller-specific code
* 'user aborted'.
*/
transfer[spi]->callback(transfer[spi]->data, -ECANCELED,
QM_SS_SPI_IDLE, (uint16_t)len);
}
return 0;
}
static void handle_spi_err_interrupt(const qm_ss_spi_t spi)
{
uint32_t intr_stat = __builtin_arc_lr(base[spi] + QM_SS_SPI_INTR_STAT);
spi_disable(spi);
QM_ASSERT((intr_stat &
(QM_SS_SPI_INTR_STAT_TXOI | QM_SS_SPI_INTR_STAT_RXFI)) == 0);
if ((intr_stat & QM_SS_SPI_INTR_RXOI) && transfer[spi]->callback) {
transfer[spi]->callback(transfer[spi]->data, -EIO,
QM_SS_SPI_RX_OVERFLOW,
transfer[spi]->rx_len - rx_c[spi]);
}
}
static void handle_spi_tx_interrupt(const qm_ss_spi_t spi)
{
/* Clear Transmit Fifo Emtpy interrupt */
__builtin_arc_sr(QM_SS_SPI_INTR_TXEI, base[spi] + QM_SS_SPI_CLR_INTR);
uint32_t ctrl = __builtin_arc_lr(base[spi] + QM_SS_SPI_CTRL);
/* Calculate number of bytes per frame (1 or 2)*/
uint8_t bytes = BYTES_PER_FRAME(ctrl);
uint8_t tmode = (uint8_t)((ctrl & QM_SS_SPI_CTRL_TMOD_MASK) >>
QM_SS_SPI_CTRL_TMOD_OFFS);
if (tx_c[spi] == 0 &&
!(__builtin_arc_lr(base[spi] + QM_SS_SPI_SR) & QM_SS_SPI_SR_BUSY)) {
if (tmode == QM_SS_SPI_TMOD_TX) {
spi_disable(spi);
if (transfer[spi]->callback) {
transfer[spi]->callback(transfer[spi]->data, 0,
QM_SS_SPI_IDLE,
transfer[spi]->tx_len);
}
} else {
QM_SS_REG_AUX_NAND(base[spi] + QM_SS_SPI_INTR_MASK,
QM_SS_SPI_INTR_TXEI);
}
return;
}
/* Make sure RX fifo does not overflow */
uint32_t rxflr = __builtin_arc_lr(base[spi] + QM_SS_SPI_RXFLR);
uint32_t txflr = __builtin_arc_lr(base[spi] + QM_SS_SPI_TXFLR);
int32_t cnt = FIFO_SIZE - rxflr - txflr - 1;
while (tx_c[spi] && cnt > 0) {
fifo_write(spi, tx_p[spi], bytes);
tx_p[spi] += bytes;
tx_c[spi]--;
cnt--;
}
}
static void handle_spi_rx_interrupt(const qm_ss_spi_t spi)
{
/* Clear RX-FIFO FULL interrupt */
__builtin_arc_sr(QM_SS_SPI_INTR_RXFI, base[spi] + QM_SS_SPI_CLR_INTR);
uint32_t ctrl = __builtin_arc_lr(base[spi] + QM_SS_SPI_CTRL);
/* Calculate number of bytes per frame (1 or 2)*/
uint8_t bytes = BYTES_PER_FRAME(ctrl);
while (__builtin_arc_lr(base[spi] + QM_SS_SPI_SR) & QM_SS_SPI_SR_RFNE &&
rx_c[spi]) {
fifo_read(spi, rx_p[spi], bytes);
rx_p[spi] += bytes;
rx_c[spi]--;
}
/* Set new FIFO threshold or complete transfer */
uint32_t new_irq_level =
(FIFO_RX_W_MARK < rx_c[spi] ? FIFO_RX_W_MARK : rx_c[spi]);
if (rx_c[spi]) {
new_irq_level--;
uint32_t ftlr = __builtin_arc_lr(base[spi] + QM_SS_SPI_FTLR);
ftlr &= ~QM_SS_SPI_FTLR_RFT_MASK;
ftlr |= (new_irq_level << QM_SS_SPI_FTLR_RFT_OFFS);
__builtin_arc_sr(ftlr, base[spi] + QM_SS_SPI_FTLR);
} else {
spi_disable(spi);
if (transfer[spi]->callback) {
transfer[spi]->callback(transfer[spi]->data, 0,
QM_SS_SPI_IDLE,
transfer[spi]->rx_len);
}
}
}
QM_ISR_DECLARE(qm_ss_spi_0_err_isr)
{
handle_spi_err_interrupt(QM_SS_SPI_0);
}
QM_ISR_DECLARE(qm_ss_spi_1_err_isr)
{
handle_spi_err_interrupt(QM_SS_SPI_1);
}
QM_ISR_DECLARE(qm_ss_spi_0_rx_isr)
{
handle_spi_rx_interrupt(QM_SS_SPI_0);
}
QM_ISR_DECLARE(qm_ss_spi_1_rx_isr)
{
handle_spi_rx_interrupt(QM_SS_SPI_1);
}
QM_ISR_DECLARE(qm_ss_spi_0_tx_isr)
{
handle_spi_tx_interrupt(QM_SS_SPI_0);
}
QM_ISR_DECLARE(qm_ss_spi_1_tx_isr)
{
handle_spi_tx_interrupt(QM_SS_SPI_1);
}

View file

@ -0,0 +1,92 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "qm_ss_timer.h"
static void (*callback[QM_SS_TIMER_NUM])(void *data);
static void *callback_data[QM_SS_TIMER_NUM];
static uint32_t qm_ss_timer_base[QM_SS_TIMER_NUM] = {QM_SS_TIMER_0_BASE};
static __inline__ void qm_ss_timer_isr(qm_ss_timer_t timer)
{
uint32_t ctrl = 0;
if (callback[timer]) {
callback[timer](callback_data[timer]);
}
ctrl = __builtin_arc_lr(qm_ss_timer_base[timer] + QM_SS_TIMER_CONTROL);
ctrl &= ~BIT(QM_SS_TIMER_CONTROL_INT_PENDING_OFFSET);
__builtin_arc_sr(ctrl, qm_ss_timer_base[timer] + QM_SS_TIMER_CONTROL);
}
QM_ISR_DECLARE(qm_ss_timer_isr_0)
{
qm_ss_timer_isr(QM_SS_TIMER_0);
}
int qm_ss_timer_set_config(const qm_ss_timer_t timer,
const qm_ss_timer_config_t *const cfg)
{
uint32_t ctrl = 0;
QM_CHECK(cfg != NULL, -EINVAL);
QM_CHECK(timer < QM_SS_TIMER_NUM, -EINVAL);
ctrl = cfg->watchdog_mode << QM_SS_TIMER_CONTROL_WATCHDOG_OFFSET;
ctrl |= cfg->inc_run_only << QM_SS_TIMER_CONTROL_NON_HALTED_OFFSET;
ctrl |= cfg->int_en << QM_SS_TIMER_CONTROL_INT_EN_OFFSET;
__builtin_arc_sr(ctrl, qm_ss_timer_base[timer] + QM_SS_TIMER_CONTROL);
__builtin_arc_sr(cfg->count,
qm_ss_timer_base[timer] + QM_SS_TIMER_LIMIT);
callback[timer] = cfg->callback;
callback_data[timer] = cfg->callback_data;
return 0;
}
int qm_ss_timer_set(const qm_ss_timer_t timer, const uint32_t count)
{
QM_CHECK(timer < QM_SS_TIMER_NUM, -EINVAL);
__builtin_arc_sr(count, qm_ss_timer_base[timer] + QM_SS_TIMER_COUNT);
return 0;
}
int qm_ss_timer_get(const qm_ss_timer_t timer, uint32_t *const count)
{
QM_CHECK(timer < QM_SS_TIMER_NUM, -EINVAL);
QM_CHECK(count != NULL, -EINVAL);
*count = __builtin_arc_lr(qm_ss_timer_base[timer] + QM_SS_TIMER_COUNT);
return 0;
}

View file

@ -0,0 +1,118 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "qm_common.h"
#include "ss_clk.h"
int ss_clk_gpio_enable(const qm_ss_gpio_t gpio)
{
QM_CHECK(gpio < QM_SS_GPIO_NUM, -EINVAL);
int addr =
(gpio == QM_SS_GPIO_0) ? QM_SS_GPIO_0_BASE : QM_SS_GPIO_1_BASE;
__builtin_arc_sr(QM_SS_GPIO_LS_SYNC_CLK_EN |
QM_SS_GPIO_LS_SYNC_SYNC_LVL,
addr + QM_SS_GPIO_LS_SYNC);
return 0;
}
int ss_clk_gpio_disable(const qm_ss_gpio_t gpio)
{
QM_CHECK(gpio < QM_SS_GPIO_NUM, -EINVAL);
int addr =
(gpio == QM_SS_GPIO_0) ? QM_SS_GPIO_0_BASE : QM_SS_GPIO_1_BASE;
__builtin_arc_sr(0, addr + QM_SS_GPIO_LS_SYNC);
return 0;
}
int ss_clk_spi_enable(const qm_ss_spi_t spi)
{
QM_CHECK(spi < QM_SS_SPI_NUM, -EINVAL);
int addr = (spi == QM_SS_SPI_0) ? QM_SS_SPI_0_BASE : QM_SS_SPI_1_BASE;
QM_SS_REG_AUX_OR(addr + QM_SS_SPI_CTRL, QM_SS_SPI_CTRL_CLK_ENA);
return 0;
}
int ss_clk_spi_disable(const qm_ss_spi_t spi)
{
QM_CHECK(spi < QM_SS_SPI_NUM, -EINVAL);
int addr = (spi == QM_SS_SPI_0) ? QM_SS_SPI_0_BASE : QM_SS_SPI_1_BASE;
QM_SS_REG_AUX_NAND(addr + QM_SS_SPI_CTRL, QM_SS_SPI_CTRL_CLK_ENA);
return 0;
}
int ss_clk_i2c_enable(const qm_ss_i2c_t i2c)
{
QM_CHECK(i2c < QM_SS_I2C_NUM, -EINVAL);
int addr = (i2c == QM_SS_I2C_0) ? QM_SS_I2C_0_BASE : QM_SS_I2C_1_BASE;
QM_SS_REG_AUX_OR(addr + QM_SS_I2C_CON, QM_SS_I2C_CON_CLK_ENA);
return 0;
}
int ss_clk_i2c_disable(const qm_ss_i2c_t i2c)
{
QM_CHECK(i2c < QM_SS_I2C_NUM, -EINVAL);
int addr = (i2c == QM_SS_I2C_0) ? QM_SS_I2C_0_BASE : QM_SS_I2C_1_BASE;
QM_SS_REG_AUX_NAND(addr + QM_SS_I2C_CON, QM_SS_I2C_CON_CLK_ENA);
return 0;
}
int ss_clk_adc_enable(void)
{
/* Enable the ADC clock */
QM_SS_REG_AUX_OR(QM_SS_ADC_BASE + QM_SS_ADC_CTRL,
QM_SS_ADC_CTRL_CLK_ENA);
return 0;
}
int ss_clk_adc_disable(void)
{
/* Disable the ADC clock */
QM_SS_REG_AUX_NAND(QM_SS_ADC_BASE + QM_SS_ADC_CTRL,
QM_SS_ADC_CTRL_CLK_ENA);
return 0;
}
int ss_clk_adc_set_div(const uint32_t div)
{
uint32_t reg;
/*
* Scale the max divisor with the system clock speed. Clock speeds less
* than 1 MHz will not work properly.
*/
QM_CHECK(div <= QM_SS_ADC_DIV_MAX * clk_sys_get_ticks_per_us(),
-EINVAL);
/* Set the ADC divisor */
reg = __builtin_arc_lr(QM_SS_ADC_BASE + QM_SS_ADC_DIVSEQSTAT);
reg &= ~(QM_SS_ADC_DIVSEQSTAT_CLK_RATIO_MASK);
__builtin_arc_sr(reg | div, QM_SS_ADC_BASE + QM_SS_ADC_DIVSEQSTAT);
return 0;
}

View file

@ -0,0 +1,88 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "ss_power_states.h"
/* Sensor Subsystem sleep operand definition.
* Only a subset applies as internal sensor RTC
* is not available.
*
* OP | Core | Timers | RTC
* 000 | 0 | 1 | 1 <-- used for SS1
* 001 | 0 | 0 | 1
* 010 | 0 | 1 | 0
* 011 | 0 | 0 | 0 <-- used for SS2
* 100 | 0 | 0 | 0
* 101 | 0 | 0 | 0
* 110 | 0 | 0 | 0
* 111 | 0 | 0 | 0
*
* sleep opcode argument:
* - [7:5] : Sleep Operand
* - [4] : Interrupt enable
* - [3:0] : Interrupt threshold value
*/
#define QM_SS_SLEEP_MODE_CORE_OFF (0x0)
#define QM_SS_SLEEP_MODE_CORE_OFF_TIMER_OFF (0x20)
#define QM_SS_SLEEP_MODE_CORE_TIMERS_RTC_OFF (0x60)
/* Enter SS1 :
* SLEEP + sleep operand
* __builtin_arc_sleep is not used here as it does not propagate sleep operand.
*/
void ss_power_cpu_ss1(const ss_power_cpu_ss1_mode_t mode)
{
/* Enter SS1 */
switch (mode) {
case SS_POWER_CPU_SS1_TIMER_OFF:
__asm__ __volatile__(
"sleep %0"
:
: "i"(QM_SS_SLEEP_MODE_CORE_OFF_TIMER_OFF));
break;
case SS_POWER_CPU_SS1_TIMER_ON:
default:
__asm__ __volatile__("sleep %0"
:
: "i"(QM_SS_SLEEP_MODE_CORE_OFF));
break;
}
}
/* Enter SS2 :
* SLEEP + sleep operand
* __builtin_arc_sleep is not used here as it does not propagate sleep operand.
*/
void ss_power_cpu_ss2(void)
{
/* Enter SS2 */
__asm__ __volatile__("sleep %0"
:
: "i"(QM_SS_SLEEP_MODE_CORE_TIMERS_RTC_OFF));
}

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@ -37,12 +37,18 @@
#include <stdint.h>
#include <stdbool.h>
#include "qm_rc.h"
#include <errno.h>
#define QM_R volatile const
#define QM_W volatile
#define QM_RW volatile
/* __attribute__((interrupt)) API requires that the interrupt handlers
* take an interrupt_frame parameter, but it is still undefined, so add
* an empty definition.
*/
struct interrupt_frame;
#ifndef NULL
#define NULL ((void *)0)
#endif
@ -139,6 +145,15 @@ void stdout_uart_setup(uint32_t baud_divisors);
#define STDOUT_UART (QM_UART_1)
#endif
/*
* Stdout UART intialization is enabled by default. Use this switch if you wish
* to disable it (e.g. if the UART is already initialized by an application
* running on the other core).
*/
#ifndef STDOUT_UART_INIT_DISABLE
#define STDOUT_UART_INIT (1)
#endif
/**
* Select assert action (default: put the IA core into HLT state)
*/
@ -186,7 +201,7 @@ void stdout_uart_setup(uint32_t baud_divisors);
/* Bitwise operation helpers */
#ifndef BIT
#define BIT(x) (1U << x)
#define BIT(x) (1U << (x))
#endif
/* Set all bits */
@ -194,6 +209,52 @@ void stdout_uart_setup(uint32_t baud_divisors);
#define SET_ALL_BITS (-1)
#endif
/*
* ISR declaration.
*
* The x86 'interrupt' attribute requires an interrupt_frame parameter.
* To keep consistency between different cores and compiler capabilities, we add
* the interrupt_frame parameter to all ISR handlers. When not needed, the value
* passed is a dummy one (NULL).
*/
#if (UNIT_TEST)
#define QM_ISR_DECLARE(handler) \
void handler(__attribute__( \
(unused)) struct interrupt_frame *__interrupt_frame__)
#else /* !UNIT_TEST */
#if (QM_SENSOR) && !(ISR_HANDLED)
/*
* Sensor Subsystem 'interrupt' attribute.
*/
#define QM_ISR_DECLARE(handler) \
__attribute__((interrupt("ilink"))) void handler(__attribute__( \
(unused)) struct interrupt_frame *__interrupt_frame__)
#elif(ISR_HANDLED)
/*
* Allow users to define their own ISR management. This includes optimisations
* and clearing EOI registers.
*/
#define QM_ISR_DECLARE(handler) void handler(__attribute__((unused)) void *data)
#elif(__iamcu__)
/*
* Lakemont with compiler supporting 'interrupt' attribute.
* We assume that if the compiler supports the IAMCU ABI it also supports the
* 'interrupt' attribute.
*/
#define QM_ISR_DECLARE(handler) \
__attribute__((interrupt)) void handler(__attribute__( \
(unused)) struct interrupt_frame *__interrupt_frame__)
#else
/*
* Lakemont with compiler not supporting the 'interrupt' attribute.
*/
#define QM_ISR_DECLARE(handler) \
void handler(__attribute__( \
(unused)) struct interrupt_frame *__interrupt_frame__)
#endif
#endif /* UNIT_TEST */
/**
* Helper to convert a macro parameter into its literal text.
*/

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@ -27,32 +27,34 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __SPINLOCK_H__
#define __SPINLOCK_H__
#include "qm_soc_regs.h"
/*
* Copyright (c) 1982, 1986 Regents of the University of California.
* All rights reserved. The Berkeley software License Agreement
* specifies the terms and conditions for redistribution.
*
* @(#)errno.h 7.1 (Berkeley) 6/4/86
* Single, shared spinlock which can be used for synchronization between the
* Lakemont and ARC cores.
* The Spinlock lock size and position in RAM must be same on both cores.
*/
#if (QUARK_SE)
#ifndef __QM_RC_H__
#define __QM_RC_H__
typedef struct {
volatile char flag[2];
volatile char turn;
} spinlock_t;
/* Return codes */
typedef enum {
QM_RC_OK = 0,
QM_RC_ERROR, /* Unknown/unclassified error */
QM_RC_EINVAL = 22, /* Invalid argument, matches Berkeley equivalent */
/* UART */
QM_RC_UART_RX_OE = 0x80, /* Receiver overrun */
QM_RC_UART_RX_FE, /* Framing error */
QM_RC_UART_RX_PE, /* Parity error */
QM_RC_UART_RX_BI, /* Break interrupt */
/* I2C */
QM_RC_I2C_ARB_LOST = 0x100, /* Arbitration lost */
QM_RC_I2C_NAK, /* Missing acknowledge */
/* SPI */
QM_RC_SPI_RX_OE = 0x120, /* RX Fifo Overflow error */
} qm_rc_t;
extern spinlock_t __esram_lock_start;
void spinlock_lock(spinlock_t *lock);
void spinlock_unlock(spinlock_t *lock);
#endif /* __QM_RC_H__ */
#define QM_SPINLOCK_LOCK() spinlock_lock(&__esram_lock_start)
#define QM_SPINLOCK_UNLOCK() spinlock_unlock(&__esram_lock_start)
#else
#define QM_SPINLOCK_LOCK()
#define QM_SPINLOCK_UNLOCK()
#endif /* defined(QM_QUARK_SE) */
#endif /* __SPINLOCK_H__ */

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@ -27,25 +27,19 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "qm_power.h"
#include "qm_scss.h"
#include "power_states.h"
#include "clk.h"
#include "qm_comparator.h"
#if (QUARK_D2000)
#include "qm_adc.h"
#endif
#if (HAS_RAR)
#include "qm_rar.h"
#endif
#include "rar.h"
void cpu_halt(void)
{
__asm__("hlt");
__asm__ __volatile__("hlt");
}
#if (QUARK_D2000)
static void clear_all_pending_interrupts(void)
{
/* Clear comparator interrupts. */
@ -64,7 +58,6 @@ static void clear_all_pending_interrupts(void)
void soc_sleep(void)
{
/* Variables to save register values. */
qm_ac_config_t ac_cfg;
uint32_t ac_power_save;
uint32_t clk_gate_save = QM_SCSS_CCU->ccu_periph_clk_gate_ctl;
uint32_t sys_clk_ctl_save = QM_SCSS_CCU->ccu_sys_clk_ctl;
@ -77,10 +70,8 @@ void soc_sleep(void)
qm_adc_set_mode(QM_ADC_0, QM_ADC_MODE_PWR_DOWN);
/* Turn off high power comparators. */
qm_ac_get_config(&ac_cfg);
ac_power_save = ac_cfg.power;
ac_cfg.power &= QM_AC_HP_COMPARATORS_MASK;
qm_ac_set_config(&ac_cfg);
ac_power_save = QM_SCSS_CMP->cmp_pwr;
QM_SCSS_CMP->cmp_pwr &= QM_AC_HP_COMPARATORS_MASK;
/*
* Program WAKE_MASK.WAKE_MASK[31:0],
@ -155,15 +146,13 @@ void soc_sleep(void)
QM_SCSS_CCU->osc0_cfg1 = osc0_cfg_save;
QM_SCSS_CCU->ccu_periph_clk_gate_ctl = clk_gate_save;
ac_cfg.power = ac_power_save;
qm_ac_set_config(&ac_cfg);
QM_SCSS_CMP->cmp_pwr = ac_power_save;
QM_ADC->adc_op_mode = adc_mode_save;
}
void soc_deep_sleep(void)
{
/* Variables to save register values. */
qm_ac_config_t ac_cfg;
uint32_t ac_power_save;
uint32_t clk_gate_save = QM_SCSS_CCU->ccu_periph_clk_gate_ctl;
uint32_t sys_clk_ctl_save = QM_SCSS_CCU->ccu_sys_clk_ctl;
@ -189,10 +178,8 @@ void soc_deep_sleep(void)
qm_adc_set_mode(QM_ADC_0, QM_ADC_MODE_DEEP_PWR_DOWN);
/* Turn off high power comparators. */
qm_ac_get_config(&ac_cfg);
ac_power_save = ac_cfg.power;
ac_cfg.power &= QM_AC_HP_COMPARATORS_MASK;
qm_ac_set_config(&ac_cfg);
ac_power_save = QM_SCSS_CMP->cmp_pwr;
QM_SCSS_CMP->cmp_pwr &= QM_AC_HP_COMPARATORS_MASK;
/* Disable all peripheral clocks. */
clk_periph_disable(CLK_PERIPH_REGISTER);
@ -229,7 +216,7 @@ void soc_deep_sleep(void)
/* Enable low voltage mode for flash controller. */
/* FlashCtrl.CTRL.LVE_MODE = 1; */
QM_FLASH[QM_FLASH_0].ctrl |= QM_FLASH_LVE_MODE;
QM_FLASH[QM_FLASH_0]->ctrl |= QM_FLASH_LVE_MODE;
/* Select 1.35V for voltage regulator. */
/* SCSS.AON_VR.VSEL = 0xB; */
@ -296,7 +283,7 @@ void soc_deep_sleep(void)
QM_SCSS_CCU->osc0_cfg0 &= ~QM_SI_OSC_1V2_MODE;
/* FlashCtrl.CTRL.LVE_MODE = 0; */
QM_FLASH[QM_FLASH_0].ctrl &= ~QM_FLASH_LVE_MODE;
QM_FLASH[QM_FLASH_0]->ctrl &= ~QM_FLASH_LVE_MODE;
/* Restore all previous values. */
QM_SCSS_CCU->ccu_sys_clk_ctl = sys_clk_ctl_save;
@ -322,8 +309,7 @@ void soc_deep_sleep(void)
QM_SCSS_CCU->ccu_periph_clk_gate_ctl = clk_gate_save;
QM_SCSS_CCU->osc1_cfg0 = osc1_cfg_save;
ac_cfg.power = ac_power_save;
qm_ac_set_config(&ac_cfg);
QM_SCSS_CMP->cmp_pwr = ac_power_save;
QM_ADC->adc_op_mode = adc_mode_save;
QM_SCSS_PMUX->pmux_slew[0] = pmux_slew_save;
@ -333,17 +319,3 @@ void soc_deep_sleep(void)
QM_SCSS_CCU->wake_mask = SET_ALL_BITS;
QM_SCSS_GP->gps1 &= ~QM_SCSS_GP_POWER_STATE_DEEP_SLEEP;
}
#elif(QUARK_SE)
void soc_sleep(void)
{
/* NOTE: Add Quark SE specific sleep code. */
cpu_halt();
}
void soc_deep_sleep(void)
{
/* NOTE: Add Quark SE specific deep sleep code. */
cpu_halt();
}
#endif

View file

@ -0,0 +1,91 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FLASH_LAYOUT_H__
#define __FLASH_LAYOUT_H__
/**
* Flash Layout for Quark D2000 Microcontrollers.
*
* @defgroup groupD2000Flash Quark D2000 Flash Layout
* @{
*/
typedef struct {
QM_RW uint16_t osc_trim_16mhz; /**< 16MHz Oscillator trim code. */
QM_RW uint16_t osc_trim_32mhz; /**< 32MHz Oscillator trim code. */
QM_RW uint16_t osc_trim_4mhz; /**< 4MHz Oscillator trim code. */
QM_RW uint16_t osc_trim_8mhz; /**< 8MHz Oscillator trim code. */
} qm_flash_otp_trim_t;
#if (UNIT_TEST)
extern uint8_t test_flash_page[0x800];
#define QM_FLASH_OTP_TRIM_CODE_BASE (&test_flash_page[0])
#else
#define QM_FLASH_OTP_TRIM_CODE_BASE (0x4)
#endif
#define QM_FLASH_OTP_TRIM_CODE \
((qm_flash_otp_trim_t *)QM_FLASH_OTP_TRIM_CODE_BASE)
#define QM_FLASH_OTP_SOC_DATA_VALID (0x24535021) /**< $SP! */
#define QM_FLASH_OTP_TRIM_MAGIC (QM_FLASH_OTP_SOC_DATA_VALID)
typedef union {
struct trim_fields {
QM_RW uint16_t
osc_trim_32mhz; /**< 32MHz Oscillator trim code. */
QM_RW uint16_t
osc_trim_16mhz; /**< 16MHz Oscillator trim code. */
QM_RW uint16_t osc_trim_8mhz; /**< 8MHz Oscillator trim code. */
QM_RW uint16_t osc_trim_4mhz; /**< 4MHz Oscillator trim code. */
} fields;
QM_RW uint32_t osc_trim_u32[2]; /**< Oscillator trim code array.*/
QM_RW uint16_t osc_trim_u16[4]; /**< Oscillator trim code array.*/
} qm_flash_data_trim_t;
#if (UNIT_TEST)
#define QM_FLASH_DATA_TRIM_BASE (&test_flash_page[100])
#define QM_FLASH_DATA_TRIM_OFFSET (100)
#else
#define QM_FLASH_DATA_TRIM_BASE (QM_FLASH_REGION_DATA_0_BASE)
#define QM_FLASH_DATA_TRIM_OFFSET ((uint32_t)QM_FLASH_DATA_TRIM_BASE & 0x3FFFF)
#endif
#define QM_FLASH_DATA_TRIM ((qm_flash_data_trim_t *)QM_FLASH_DATA_TRIM_BASE)
#define QM_FLASH_DATA_TRIM_CODE (&QM_FLASH_DATA_TRIM->fields)
#define QM_FLASH_DATA_TRIM_REGION QM_FLASH_REGION_DATA
#define QM_FLASH_TRIM_PRESENT_MASK (0xFC00)
#define QM_FLASH_TRIM_PRESENT (0x7C00)
/**
* @}
*/
#endif /* __FLASH_LAYOUT_H__ */

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@ -27,25 +27,29 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __QM_POWER_H__
#define __QM_POWER_H__
#ifndef __POWER_STATES_H__
#define __POWER_STATES_H__
#include "qm_common.h"
#include "qm_soc_regs.h"
/**
* Power mode control for Quark Microcontrollers.
* Power mode control for Quark D2000 Microcontrollers.
*
* @defgroup groupPower Power state
* @defgroup groupD2000Power Quark D2000 Power states
* @{
*/
/**
* Put CPU in halt state.
*
* Halts the CPU until next interrupt or reset.
*/
void cpu_halt(void);
/**
* Put SoC to sleep.
*
* Enter into sleep mode. The hybrid oscillator is disabled, most peripherals
* are disabled and the voltage regulator is set into retention mode.
* The following peripherals are disabled in this mode:
@ -63,12 +67,12 @@ void cpu_halt(void);
* - AON Timers
* - RTC
* - Low power comparators
*
* @brief Put SoC to sleep
*/
void soc_sleep();
/**
* Put SoC to deep sleep.
*
* Enter into deep sleep mode. All clocks are gated. The only way to return
* from this is to have an interrupt trigger on the low power comparators.
*/
@ -78,4 +82,4 @@ void soc_deep_sleep();
* @}
*/
#endif /* __QM_POWER_H__ */
#endif /* __POWER_STATES_H__ */

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@ -97,6 +97,12 @@ qm_scss_ccu_reg_t test_scss_ccu;
#define OSC0_CFG0_OSC0_XTAL_COUNT_VALUE_MASK (0x00600000)
#define OSC0_CFG0_OSC0_XTAL_COUNT_VALUE_OFFS (21)
/* Silicon Oscillator parameters */
#define OSC0_CFG1_FTRIMOTP_MASK (0x3FF00000)
#define OSC0_CFG1_FTRIMOTP_OFFS (20)
#define OSC0_CFG1_SI_FREQ_SEL_MASK (0x00000300)
#define OSC0_CFG1_SI_FREQ_SEL_OFFS (8)
#define QM_OSC0_LOCK_SI BIT(0)
#define QM_OSC0_LOCK_XTAL BIT(1)
#define QM_OSC0_EN_SI_OSC BIT(1)
@ -110,6 +116,7 @@ qm_scss_ccu_reg_t test_scss_ccu;
#define QM_CCU_PERIPH_PCLK_DIV_OFFSET (1)
#define QM_CCU_PERIPH_PCLK_DIV_EN BIT(0)
#define QM_CCU_SYS_CLK_DIV_EN BIT(7)
#define QM_CCU_SYS_CLK_DIV_MASK (0x00000700)
#define QM_CCU_SYS_CLK_DIV_DEF_MASK (0xFFFFF47F)
#define QM_OSC0_SI_FREQ_SEL_DEF_MASK (0xFFFFFCFF)
#define QM_OSC0_SI_FREQ_SEL_4MHZ (3 >> 8)
@ -124,6 +131,7 @@ qm_scss_ccu_reg_t test_scss_ccu;
#define QM_CCU_GPIO_DB_CLK_DIV_DEF_MASK (0xFFFFFFE1)
#define QM_CCU_EXT_CLK_DIV_DEF_MASK (0xFFFFFFE3)
#define QM_CCU_RTC_CLK_DIV_DEF_MASK (0xFFFFFF83)
#define QM_CCU_DMA_CLK_EN BIT(6)
#define QM_CCU_WAKE_MASK_COMPARATOR_BIT BIT(14)
@ -372,6 +380,9 @@ qm_scss_info_reg_t test_scss_info;
* The vector numbers must be defined without arithmetic expressions nor
* parentheses because they are expanded as token concatenation.
*/
#define QM_INT_VECTOR_DOUBLE_FAULT 8
#define QM_IRQ_RTC_0 (2)
#define QM_IRQ_RTC_0_MASK_OFFSET (12)
#define QM_IRQ_RTC_0_VECTOR 34
@ -396,6 +407,10 @@ qm_scss_info_reg_t test_scss_info;
#define QM_IRQ_ADC_0_MASK_OFFSET (34)
#define QM_IRQ_ADC_0_VECTOR 41
#define QM_IRQ_ADC_PWR_0 (19)
#define QM_IRQ_ADC_PWR_0_MASK_OFFSET (33)
#define QM_IRQ_ADC_PWR_0_VECTOR 51
#define QM_IRQ_WDT_0 (16)
#define QM_IRQ_WDT_0_MASK_OFFSET (13)
#define QM_IRQ_WDT_0_VECTOR 48
@ -429,6 +444,18 @@ qm_scss_info_reg_t test_scss_info;
#define QM_IRQ_UART_1_MASK_OFFSET (7)
#define QM_IRQ_UART_1_VECTOR 38
#define QM_IRQ_DMA_0 (13)
#define QM_IRQ_DMA_0_MASK_OFFSET (14)
#define QM_IRQ_DMA_0_VECTOR 45
#define QM_IRQ_DMA_1 (12)
#define QM_IRQ_DMA_1_MASK_OFFSET (15)
#define QM_IRQ_DMA_1_VECTOR 44
#define QM_IRQ_DMA_ERR (0)
#define QM_IRQ_DMA_ERR_MASK_OFFSET (28)
#define QM_IRQ_DMA_ERR_VECTOR 32
/**
* Number of PWM/Timer controllers.
*/
@ -547,15 +574,17 @@ typedef struct {
} qm_uart_reg_t;
#if (UNIT_TEST)
qm_uart_reg_t test_uart[QM_UART_NUM];
#define QM_UART ((qm_uart_reg_t *)(&test_uart))
qm_uart_reg_t test_uart_instance;
qm_uart_reg_t *test_uart[QM_UART_NUM];
#define QM_UART test_uart
#else
/** UART register base address */
#define QM_UART_BASE (0xB0002000)
#define QM_UART_0_BASE (0xB0002000)
#define QM_UART_1_BASE (0xB0002400)
/** UART register block */
#define QM_UART ((qm_uart_reg_t *)QM_UART_BASE)
extern qm_uart_reg_t *qm_uart[QM_UART_NUM];
#define QM_UART qm_uart
#endif
/**
@ -612,6 +641,50 @@ extern qm_spi_reg_t *qm_spi_controllers[QM_SPI_NUM];
#define QM_SPI_SLV_BASE (0xB0001800)
#endif
/* SPI Ctrlr0 register */
#define QM_SPI_CTRLR0_DFS_32_MASK (0x001F0000)
#define QM_SPI_CTRLR0_TMOD_MASK (0x00000300)
#define QM_SPI_CTRLR0_SCPOL_SCPH_MASK (0x000000C0)
#define QM_SPI_CTRLR0_FRF_MASK (0x00000030)
#define QM_SPI_CTRLR0_DFS_32_OFFSET (16)
#define QM_SPI_CTRLR0_TMOD_OFFSET (8)
#define QM_SPI_CTRLR0_SCPOL_SCPH_OFFSET (6)
#define QM_SPI_CTRLR0_FRF_OFFSET (4)
/* SPI SSI Enable register */
#define QM_SPI_SSIENR_SSIENR BIT(0)
/* SPI Status register */
#define QM_SPI_SR_BUSY BIT(0)
#define QM_SPI_SR_TFNF BIT(1)
#define QM_SPI_SR_TFE BIT(2)
/* SPI Interrupt Mask register */
#define QM_SPI_IMR_MASK_ALL (0x00)
#define QM_SPI_IMR_TXEIM BIT(0)
#define QM_SPI_IMR_TXOIM BIT(1)
#define QM_SPI_IMR_RXUIM BIT(2)
#define QM_SPI_IMR_RXOIM BIT(3)
#define QM_SPI_IMR_RXFIM BIT(4)
/* SPI Interrupt Status register */
#define QM_SPI_ISR_TXEIS BIT(0)
#define QM_SPI_ISR_TXOIS BIT(1)
#define QM_SPI_ISR_RXUIS BIT(2)
#define QM_SPI_ISR_RXOIS BIT(3)
#define QM_SPI_ISR_RXFIS BIT(4)
/* SPI Raw Interrupt Status register */
#define QM_SPI_RISR_TXEIR BIT(0)
#define QM_SPI_RISR_TXOIR BIT(1)
#define QM_SPI_RISR_RXUIR BIT(2)
#define QM_SPI_RISR_RXOIR BIT(3)
#define QM_SPI_RISR_RXFIR BIT(4)
/* SPI DMA control */
#define QM_SPI_DMACR_RDMAE BIT(0)
#define QM_SPI_DMACR_TDMAE BIT(1)
/**
* Number of RTC controllers.
*/
@ -706,16 +779,18 @@ typedef struct {
} qm_i2c_reg_t;
#if (UNIT_TEST)
qm_i2c_reg_t test_i2c[QM_I2C_NUM];
qm_i2c_reg_t test_i2c_instance[QM_I2C_NUM];
qm_i2c_reg_t *test_i2c[QM_I2C_NUM];
#define QM_I2C ((qm_i2c_reg_t *)(&test_i2c))
#define QM_I2C test_i2c
#else
/** I2C Master register base address */
#define QM_I2C_BASE (0xB0002800)
#define QM_I2C_0_BASE (0xB0002800)
/** I2C register block */
#define QM_I2C ((qm_i2c_reg_t *)QM_I2C_BASE)
extern qm_i2c_reg_t *qm_i2c[QM_I2C_NUM];
#define QM_I2C qm_i2c
#endif
#define QM_I2C_IC_ENABLE_CONTROLLER_EN BIT(0)
@ -734,6 +809,7 @@ qm_i2c_reg_t test_i2c[QM_I2C_NUM];
#define QM_I2C_IC_CON_RESTART_EN BIT(5)
#define QM_I2C_IC_DATA_CMD_READ BIT(8)
#define QM_I2C_IC_DATA_CMD_STOP_BIT_CTRL BIT(9)
#define QM_I2C_IC_DATA_CMD_LSB_MASK (0x000000FF)
#define QM_I2C_IC_RAW_INTR_STAT_TX_ABRT BIT(6)
#define QM_I2C_IC_TX_ABRT_SOURCE_NAK_MASK (0x1F)
#define QM_I2C_IC_TX_ABRT_SOURCE_ARB_LOST BIT(12)
@ -765,6 +841,10 @@ qm_i2c_reg_t test_i2c[QM_I2C_NUM];
#define QM_I2C_FIFO_SIZE (16)
/* I2C DMA */
#define QM_I2C_IC_DMA_CR_RX_ENABLE BIT(0)
#define QM_I2C_IC_DMA_CR_TX_ENABLE BIT(1)
/**
* Number of GPIO controllers.
*/
@ -875,6 +955,7 @@ qm_adc_reg_t test_adc;
#define QM_ADC_INTR_STATUS_FO BIT(1)
#define QM_ADC_INTR_STATUS_CONT_CC BIT(2)
/* Operating mode */
#define QM_ADC_OP_MODE_IE BIT(27)
#define QM_ADC_OP_MODE_DELAY_OFFSET (0x3)
#define QM_ADC_OP_MODE_DELAY_MASK (0xFFF8)
#define QM_ADC_OP_MODE_OM_MASK (0x7)
@ -904,19 +985,20 @@ typedef struct {
#define QM_FLASH_REGION_DATA_0_SIZE (0x1000)
#define QM_FLASH_REGION_DATA_0_PAGES (0x02)
#define QM_FLASH_PAGE_MASK (0xF800)
#if (UNIT_TEST)
qm_flash_reg_t test_flash;
uint32_t test_flash_page[0x200];
qm_flash_reg_t test_flash_instance;
qm_flash_reg_t *test_flash[QM_FLASH_NUM];
uint8_t test_flash_page[0x800];
#define QM_FLASH_BASE ((uint32_t *)&test_flash)
#define QM_FLASH ((qm_flash_reg_t *)(&test_flash))
#define QM_FLASH test_flash
#define QM_FLASH_REGION_DATA_0_BASE (test_flash_page)
#define QM_FLASH_REGION_SYS_0_BASE (test_flash_page)
#define QM_FLASH_REGION_OTP_0_BASE (test_flash_page)
#define QM_FLASH_PAGE_MASK (0xCFF)
#define QM_FLASH_MAX_ADDR (0xFFFFFFFF)
#else
/** Flash physical address mappings */
@ -924,18 +1006,22 @@ uint32_t test_flash_page[0x200];
#define QM_FLASH_REGION_SYS_0_BASE (0x00180000)
#define QM_FLASH_REGION_OTP_0_BASE (0x00000000)
#define QM_FLASH_PAGE_MASK (0xF800)
#define QM_FLASH_MAX_ADDR (0x8000)
/** Flash controller register base address */
#define QM_FLASH_BASE (0xB0100000)
#define QM_FLASH_BASE_0 (0xB0100000)
/** Flash controller register block */
#define QM_FLASH ((qm_flash_reg_t *)QM_FLASH_BASE)
extern qm_flash_reg_t *qm_flash[QM_FLASH_NUM];
#define QM_FLASH qm_flash
#endif
#define QM_FLASH_REGION_DATA_BASE_OFFSET (0x04)
#define QM_FLASH_MAX_WAIT_STATES (0xF)
#define QM_FLASH_MAX_US_COUNT (0x3F)
#define QM_FLASH_MAX_ADDR (0x8000)
#define QM_FLASH_MAX_PAGE_NUM (QM_FLASH_MAX_ADDR / (4 * QM_FLASH_PAGE_SIZE))
#define QM_FLASH_MAX_PAGE_NUM \
(QM_FLASH_MAX_ADDR / (4 * QM_FLASH_PAGE_SIZE_DWORDS))
#define QM_FLASH_LVE_MODE BIT(5)
/**
@ -1035,7 +1121,7 @@ qm_mvic_reg_t test_mvic;
#define QM_INT_CONTROLLER QM_MVIC
/* Signal the interrupt controller that the interrupt was handled. The vector
* argument is ignored. */
#if !defined(USE_ISR_EOI)
#if defined(ISR_HANDLED)
#define QM_ISR_EOI(vector)
#else
#define QM_ISR_EOI(vector) (QM_INT_CONTROLLER->eoi.reg = 0)
@ -1056,6 +1142,228 @@ qm_ioapic_reg_t test_ioapic;
#define QM_IOAPIC ((qm_ioapic_reg_t *)QM_IOAPIC_BASE)
#endif
/** DMA */
/**
* DMA instances
*/
typedef enum {
QM_DMA_0, /**< DMA controller id. */
QM_DMA_NUM /**< Number of DMA controllers. */
} qm_dma_t;
/**
* DMA channel IDs
*/
typedef enum {
QM_DMA_CHANNEL_0 = 0, /**< DMA channel id for channel 0 */
QM_DMA_CHANNEL_1, /**< DMA channel id for channel 1 */
QM_DMA_CHANNEL_NUM /**< Number of DMA channels */
} qm_dma_channel_id_t;
/**
* DMA hardware handshake interfaces
*/
typedef enum {
DMA_HW_IF_UART_A_TX = 0x0, /**< UART_A_TX */
DMA_HW_IF_UART_A_RX = 0x1, /**< UART_A_RX */
DMA_HW_IF_UART_B_TX = 0x2, /**< UART_B_TX*/
DMA_HW_IF_UART_B_RX = 0x3, /**< UART_B_RX */
DMA_HW_IF_SPI_MASTER_0_TX = 0x4, /**< SPI_Master_0_TX */
DMA_HW_IF_SPI_MASTER_0_RX = 0x5, /**< SPI_Master_0_RX */
DMA_HW_IF_SPI_SLAVE_TX = 0x8, /**< SPI_Slave_TX */
DMA_HW_IF_SPI_SLAVE_RX = 0x9, /**< SPI_Slave_RX */
DMA_HW_IF_I2C_MASTER_0_TX = 0xc, /**< I2C_Master_0_TX */
DMA_HW_IF_I2C_MASTER_0_RX = 0xd, /**< I2C_Master_0_RX */
} qm_dma_handshake_interface_t;
/**
* DMA channel register block type
*/
typedef struct {
QM_RW uint32_t sar_low; /**< SAR */
QM_RW uint32_t sar_high; /**< SAR */
QM_RW uint32_t dar_low; /**< DAR */
QM_RW uint32_t dar_high; /**< DAR */
QM_RW uint32_t llp_low; /**< LLP */
QM_RW uint32_t llp_high; /**< LLP */
QM_RW uint32_t ctrl_low; /**< CTL */
QM_RW uint32_t ctrl_high; /**< CTL */
QM_RW uint32_t src_stat_low; /**< SSTAT */
QM_RW uint32_t src_stat_high; /**< SSTAT */
QM_RW uint32_t dst_stat_low; /**< DSTAT */
QM_RW uint32_t dst_stat_high; /**< DSTAT */
QM_RW uint32_t src_stat_addr_low; /**< SSTATAR */
QM_RW uint32_t src_stat_addr_high; /**< SSTATAR */
QM_RW uint32_t dst_stat_addr_low; /**< DSTATAR */
QM_RW uint32_t dst_stat_addr_high; /**< DSTATAR */
QM_RW uint32_t cfg_low; /**< CFG */
QM_RW uint32_t cfg_high; /**< CFG */
QM_RW uint32_t src_sg_low; /**< SGR */
QM_RW uint32_t src_sg_high; /**< SGR */
QM_RW uint32_t dst_sg_low; /**< DSR */
QM_RW uint32_t dst_sg_high; /**< DSR */
} qm_dma_chan_reg_t;
/** DMA channel control register offsets and masks */
#define QM_DMA_CTL_L_INT_EN_MASK BIT(0)
#define QM_DMA_CTL_L_DST_TR_WIDTH_OFFSET (1)
#define QM_DMA_CTL_L_DST_TR_WIDTH_MASK (0x7 << QM_DMA_CTL_L_DST_TR_WIDTH_OFFSET)
#define QM_DMA_CTL_L_SRC_TR_WIDTH_OFFSET (4)
#define QM_DMA_CTL_L_SRC_TR_WIDTH_MASK (0x7 << QM_DMA_CTL_L_SRC_TR_WIDTH_OFFSET)
#define QM_DMA_CTL_L_DINC_OFFSET (7)
#define QM_DMA_CTL_L_DINC_MASK (0x3 << QM_DMA_CTL_L_DINC_OFFSET)
#define QM_DMA_CTL_L_SINC_OFFSET (9)
#define QM_DMA_CTL_L_SINC_MASK (0x3 << QM_DMA_CTL_L_SINC_OFFSET)
#define QM_DMA_CTL_L_DEST_MSIZE_OFFSET (11)
#define QM_DMA_CTL_L_DEST_MSIZE_MASK (0x7 << QM_DMA_CTL_L_DEST_MSIZE_OFFSET)
#define QM_DMA_CTL_L_SRC_MSIZE_OFFSET (14)
#define QM_DMA_CTL_L_SRC_MSIZE_MASK (0x7 << QM_DMA_CTL_L_SRC_MSIZE_OFFSET)
#define QM_DMA_CTL_L_TT_FC_OFFSET (20)
#define QM_DMA_CTL_L_TT_FC_MASK (0x7 << QM_DMA_CTL_L_TT_FC_OFFSET)
#define QM_DMA_CTL_L_LLP_DST_EN_MASK BIT(27)
#define QM_DMA_CTL_L_LLP_SRC_EN_MASK BIT(28)
#define QM_DMA_CTL_H_BLOCK_TS_OFFSET (0)
#define QM_DMA_CTL_H_BLOCK_TS_MASK (0xfff << QM_DMA_CTL_H_BLOCK_TS_OFFSET)
#define QM_DMA_CTL_H_BLOCK_TS_MAX 4095
#define QM_DMA_CTL_H_BLOCK_TS_MIN 1
/** DMA channel config register offsets and masks */
#define QM_DMA_CFG_L_CH_SUSP_MASK BIT(8)
#define QM_DMA_CFG_L_FIFO_EMPTY_MASK BIT(9)
#define QM_DMA_CFG_L_HS_SEL_DST_OFFSET 10
#define QM_DMA_CFG_L_HS_SEL_DST_MASK BIT(QM_DMA_CFG_L_HS_SEL_DST_OFFSET)
#define QM_DMA_CFG_L_HS_SEL_SRC_OFFSET 11
#define QM_DMA_CFG_L_HS_SEL_SRC_MASK BIT(QM_DMA_CFG_L_HS_SEL_SRC_OFFSET)
#define QM_DMA_CFG_L_DST_HS_POL_OFFSET 18
#define QM_DMA_CFG_L_DST_HS_POL_MASK BIT(QM_DMA_CFG_L_DST_HS_POL_OFFSET)
#define QM_DMA_CFG_L_SRC_HS_POL_OFFSET 19
#define QM_DMA_CFG_L_SRC_HS_POL_MASK BIT(QM_DMA_CFG_L_SRC_HS_POL_OFFSET)
#define QM_DMA_CFG_L_RELOAD_SRC_MASK BIT(30)
#define QM_DMA_CFG_L_RELOAD_DST_MASK BIT(31)
#define QM_DMA_CFG_H_SRC_PER_OFFSET (7)
#define QM_DMA_CFG_H_SRC_PER_MASK (0xf << QM_DMA_CFG_H_SRC_PER_OFFSET)
#define QM_DMA_CFG_H_DEST_PER_OFFSET (11)
#define QM_DMA_CFG_H_DEST_PER_MASK (0xf << QM_DMA_CFG_H_DEST_PER_OFFSET)
/**
* DMA interrupt register block type
*/
typedef struct {
QM_RW uint32_t raw_tfr_low; /**< RawTfr */
QM_RW uint32_t raw_tfr_high; /**< RawTfr */
QM_RW uint32_t raw_block_low; /**< RawBlock */
QM_RW uint32_t raw_block_high; /**< RawBlock */
QM_RW uint32_t raw_src_trans_low; /**< RawSrcTran */
QM_RW uint32_t raw_src_trans_high; /**< RawSrcTran */
QM_RW uint32_t raw_dst_trans_low; /**< RawDstTran */
QM_RW uint32_t raw_dst_trans_high; /**< RawDstTran */
QM_RW uint32_t raw_err_low; /**< RawErr */
QM_RW uint32_t raw_err_high; /**< RawErr */
QM_RW uint32_t status_tfr_low; /**< StatusTfr */
QM_RW uint32_t status_tfr_high; /**< StatusTfr */
QM_RW uint32_t status_block_low; /**< StatusBlock */
QM_RW uint32_t status_block_high; /**< StatusBlock */
QM_RW uint32_t status_src_trans_low; /**< StatusSrcTran */
QM_RW uint32_t status_src_trans_high; /**< StatusSrcTran */
QM_RW uint32_t status_dst_trans_low; /**< StatusDstTran */
QM_RW uint32_t status_dst_trans_high; /**< StatusDstTran */
QM_RW uint32_t status_err_low; /**< StatusErr */
QM_RW uint32_t status_err_high; /**< StatusErr */
QM_RW uint32_t mask_tfr_low; /**< MaskTfr */
QM_RW uint32_t mask_tfr_high; /**< MaskTfr */
QM_RW uint32_t mask_block_low; /**< MaskBlock */
QM_RW uint32_t mask_block_high; /**< MaskBlock */
QM_RW uint32_t mask_src_trans_low; /**< MaskSrcTran */
QM_RW uint32_t mask_src_trans_high; /**< MaskSrcTran */
QM_RW uint32_t mask_dst_trans_low; /**< MaskDstTran */
QM_RW uint32_t mask_dst_trans_high; /**< MaskDstTran */
QM_RW uint32_t mask_err_low; /**< MaskErr */
QM_RW uint32_t mask_err_high; /**< MaskErr */
QM_RW uint32_t clear_tfr_low; /**< ClearTfr */
QM_RW uint32_t clear_tfr_high; /**< ClearTfr */
QM_RW uint32_t clear_block_low; /**< ClearBlock */
QM_RW uint32_t clear_block_high; /**< ClearBlock */
QM_RW uint32_t clear_src_trans_low; /**< ClearSrcTran */
QM_RW uint32_t clear_src_trans_high; /**< ClearSrcTran */
QM_RW uint32_t clear_dst_trans_low; /**< ClearDstTran */
QM_RW uint32_t clear_dst_trans_high; /**< ClearDstTran */
QM_RW uint32_t clear_err_low; /**< ClearErr */
QM_RW uint32_t clear_err_high; /**< ClearErr */
QM_RW uint32_t status_int_low; /**< StatusInt */
QM_RW uint32_t status_int_high; /**< StatusInt */
} qm_dma_int_reg_t;
/** DMA interrupt status register bits */
#define QM_DMA_INT_STATUS_TFR BIT(0)
#define QM_DMA_INT_STATUS_ERR BIT(4)
/**
* DMA miscellaneous register block type
*/
typedef struct {
QM_RW uint32_t cfg_low; /**< DmaCfgReg */
QM_RW uint32_t cfg_high; /**< DmaCfgReg */
QM_RW uint32_t chan_en_low; /**< ChEnReg */
QM_RW uint32_t chan_en_high; /**< ChEnReg */
QM_RW uint32_t id_low; /**< DmaIdReg */
QM_RW uint32_t id_high; /**< DmaIdReg */
QM_RW uint32_t test_low; /**< DmaTestReg */
QM_RW uint32_t test_high; /**< DmaTestReg */
QM_RW uint32_t reserved[4]; /**< Reserved */
} qm_dma_misc_reg_t;
/** Channel write enable in the misc channel enable register */
#define QM_DMA_MISC_CHAN_EN_WE_OFFSET (8)
/** Controller enable bit in the misc config register */
#define QM_DMA_MISC_CFG_DMA_EN BIT(0)
typedef struct {
QM_RW qm_dma_chan_reg_t chan_reg[8]; /**< Channel Register */
QM_RW qm_dma_int_reg_t int_reg; /**< Interrupt Register */
QM_RW uint32_t reserved[12]; /**< Reserved (SW HS) */
QM_RW qm_dma_misc_reg_t misc_reg; /**< Miscellaneous Register */
} qm_dma_reg_t;
#if (UNIT_TEST)
qm_dma_reg_t test_dma_instance[QM_DMA_NUM];
qm_dma_reg_t *test_dma[QM_DMA_NUM];
#define QM_DMA test_dma
#else
#define QM_DMA_BASE (0xB0700000)
extern qm_dma_reg_t *qm_dma[QM_DMA_NUM];
#define QM_DMA qm_dma
#endif
/**
* Peripheral clock type.
*/
typedef enum {
CLK_PERIPH_REGISTER = BIT(0), /**< Peripheral Clock Gate Enable. */
CLK_PERIPH_CLK = BIT(1), /**< Peripheral Clock Enable. */
CLK_PERIPH_I2C_M0 = BIT(2), /**< I2C Master 0 Clock Enable. */
CLK_PERIPH_SPI_S = BIT(4), /**< SPI Slave Clock Enable. */
CLK_PERIPH_SPI_M0 = BIT(5), /**< SPI Master 0 Clock Enable. */
CLK_PERIPH_GPIO_INTERRUPT = BIT(7), /**< GPIO Interrupt Clock Enable. */
CLK_PERIPH_GPIO_DB = BIT(8), /**< GPIO Debounce Clock Enable. */
CLK_PERIPH_WDT_REGISTER = BIT(10), /**< Watchdog Clock Enable. */
CLK_PERIPH_RTC_REGISTER = BIT(11), /**< RTC Clock Gate Enable. */
CLK_PERIPH_PWM_REGISTER = BIT(12), /**< PWM Clock Gate Enable. */
CLK_PERIPH_GPIO_REGISTER = BIT(13), /**< GPIO Clock Gate Enable. */
CLK_PERIPH_SPI_M0_REGISTER =
BIT(14), /**< SPI Master 0 Clock Gate Enable. */
CLK_PERIPH_SPI_S_REGISTER =
BIT(16), /**< SPI Slave Clock Gate Enable. */
CLK_PERIPH_UARTA_REGISTER = BIT(17), /**< UARTA Clock Gate Enable. */
CLK_PERIPH_UARTB_REGISTER = BIT(18), /**< UARTB Clock Gate Enable. */
CLK_PERIPH_I2C_M0_REGISTER =
BIT(19), /**< I2C Master 0 Clock Gate Enable. */
CLK_PERIPH_ADC = BIT(22), /**< ADC Clock Enable. */
CLK_PERIPH_ADC_REGISTER = BIT(23), /**< ADC Clock Gate Enable. */
CLK_PERIPH_ALL = 0xCFFFFF /**< Quark D2000 peripherals Enable. */
} clk_periph_t;
/* Default mask values */
#define CLK_EXTERN_DIV_DEF_MASK (0xFFFFFFE3)
#define CLK_SYS_CLK_DIV_DEF_MASK (0xFFFFF87F)
@ -1064,6 +1372,16 @@ qm_ioapic_reg_t test_ioapic;
#define CLK_ADC_DIV_DEF_MASK (0xFC00FFFF)
#define CLK_PERIPH_DIV_DEF_MASK (0xFFFFFFF9)
/**
* Version variables.
*/
#if (UNIT_TEST)
uint32_t test_rom_version;
#define ROM_VERSION_ADDRESS &test_rom_version;
#else
#define ROM_VERSION_ADDRESS (0x1FFC);
#endif
/**
@}
*/

View file

@ -0,0 +1,84 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "power_states.h"
#include "vreg.h"
#if (QM_SENSOR)
#include "qm_sensor_regs.h"
#endif
void power_soc_lpss_enable()
{
QM_SCSS_CCU->ccu_lp_clk_ctl |= QM_SCSS_CCU_SS_LPS_EN;
}
void power_soc_lpss_disable()
{
QM_SCSS_CCU->ccu_lp_clk_ctl &= ~QM_SCSS_CCU_SS_LPS_EN;
}
void power_soc_sleep()
{
/* Go to sleep */
QM_SCSS_PMU->slp_cfg &= ~QM_SCSS_SLP_CFG_LPMODE_EN;
QM_SCSS_PMU->pm1c |= QM_SCSS_PM1C_SLPEN;
}
void power_soc_deep_sleep()
{
/* Switch to linear regulators */
vreg_plat1p8_set_mode(VREG_MODE_LINEAR);
vreg_plat3p3_set_mode(VREG_MODE_LINEAR);
/* Enable low power sleep mode */
QM_SCSS_PMU->slp_cfg |= QM_SCSS_SLP_CFG_LPMODE_EN;
QM_SCSS_PMU->pm1c |= QM_SCSS_PM1C_SLPEN;
}
#if (!QM_SENSOR)
void power_cpu_c1()
{
__asm__ __volatile__("hlt");
}
void power_cpu_c2()
{
QM_SCSS_CCU->ccu_lp_clk_ctl &= ~QM_SCSS_CCU_C2_LP_EN;
/* Read P_LVL2 to trigger a C2 request */
QM_SCSS_PMU->p_lvl2;
}
void power_cpu_c2lp()
{
QM_SCSS_CCU->ccu_lp_clk_ctl |= QM_SCSS_CCU_C2_LP_EN;
/* Read P_LVL2 to trigger a C2 request */
QM_SCSS_PMU->p_lvl2;
}
#endif

View file

@ -0,0 +1,94 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "vreg.h"
typedef enum { AON_VR = 0, PLAT3P3_VR, PLAT1P8_VR, HOST_VR, VREG_NUM } vreg_t;
QM_RW uint32_t *vreg[VREG_NUM] = {
&QM_SCSS_PMU->aon_vr, &QM_SCSS_PMU->plat3p3_vr, &QM_SCSS_PMU->plat1p8_vr,
&QM_SCSS_PMU->host_vr};
static int vreg_set_mode(const vreg_t id, const vreg_mode_t mode)
{
QM_CHECK(mode < VREG_MODE_NUM, -EINVAL);
uint32_t vr;
vr = *vreg[id];
switch (mode) {
case VREG_MODE_SWITCHING:
vr |= QM_SCSS_VR_EN;
vr &= ~QM_SCSS_VR_VREG_SEL;
break;
case VREG_MODE_LINEAR:
vr |= QM_SCSS_VR_EN;
vr |= QM_SCSS_VR_VREG_SEL;
break;
case VREG_MODE_SHUTDOWN:
vr &= ~QM_SCSS_VR_EN;
break;
default:
break;
}
*vreg[id] = vr;
while ((mode == VREG_MODE_SWITCHING) &&
(*vreg[id] & QM_SCSS_VR_ROK) == 0) {
}
return 0;
}
int vreg_aon_set_mode(const vreg_mode_t mode)
{
QM_CHECK(mode < VREG_MODE_NUM, -EINVAL);
QM_CHECK(mode != VREG_MODE_SWITCHING, -EINVAL);
return vreg_set_mode(AON_VR, mode);
}
int vreg_plat3p3_set_mode(const vreg_mode_t mode)
{
QM_CHECK(mode < VREG_MODE_NUM, -EINVAL);
return vreg_set_mode(PLAT3P3_VR, mode);
}
int vreg_plat1p8_set_mode(const vreg_mode_t mode)
{
QM_CHECK(mode < VREG_MODE_NUM, -EINVAL);
return vreg_set_mode(PLAT1P8_VR, mode);
}
int vreg_host_set_mode(const vreg_mode_t mode)
{
QM_CHECK(mode < VREG_MODE_NUM, -EINVAL);
return vreg_set_mode(HOST_VR, mode);
}

View file

@ -0,0 +1,94 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FLASH_LAYOUT_H__
#define __FLASH_LAYOUT_H__
/**
* Flash Layout for Quark SE Microcontrollers.
*
* @defgroup groupSEFlash Quark SE Flash Layout
* @{
*/
typedef struct {
QM_RW uint32_t magic; /**< Magic Number. */
QM_RW uint16_t version; /**< 0x0100. */
QM_RW uint16_t reserved; /**< Reserved. */
QM_RW uint16_t osc_trim_32mhz; /**< 32MHz Oscillator trim code. */
QM_RW uint16_t osc_trim_16mhz; /**< 16MHz Oscillator trim code. */
QM_RW uint16_t osc_trim_8mhz; /**< 8MHz Oscillator trim code. */
QM_RW uint16_t osc_trim_4mhz; /**< 4MHz Oscillator trim code. */
} qm_flash_otp_trim_t;
#if (UNIT_TEST)
extern uint8_t test_flash_page[0x800];
#define QM_FLASH_OTP_TRIM_CODE_BASE (&test_flash_page[0])
#else
#define QM_FLASH_OTP_TRIM_CODE_BASE (0xFFFFE1F0)
#endif
#define QM_FLASH_OTP_TRIM_CODE \
((qm_flash_otp_trim_t *)QM_FLASH_OTP_TRIM_CODE_BASE)
#define QM_FLASH_OTP_SOC_DATA_VALID (0x24535021) /**< $SP! */
#define QM_FLASH_OTP_TRIM_MAGIC (QM_FLASH_OTP_TRIM_CODE->magic)
typedef union {
struct trim_fields {
QM_RW uint16_t
osc_trim_32mhz; /**< 32MHz Oscillator trim code. */
QM_RW uint16_t
osc_trim_16mhz; /**< 16MHz Oscillator trim code. */
QM_RW uint16_t osc_trim_8mhz; /**< 8MHz Oscillator trim code. */
QM_RW uint16_t osc_trim_4mhz; /**< 4MHz Oscillator trim code. */
} fields;
QM_RW uint32_t osc_trim_u32[2]; /**< Oscillator trim code array.*/
QM_RW uint16_t osc_trim_u16[2]; /**< Oscillator trim code array.*/
} qm_flash_data_trim_t;
#if (UNIT_TEST)
#define QM_FLASH_DATA_TRIM_BASE (&test_flash_page[100])
#define QM_FLASH_DATA_TRIM_OFFSET (100)
#else
#define QM_FLASH_DATA_TRIM_BASE (0x4002F000)
#define QM_FLASH_DATA_TRIM_OFFSET ((uint32_t)QM_FLASH_DATA_TRIM_BASE & 0x3FFFF)
#endif
#define QM_FLASH_DATA_TRIM ((qm_flash_data_trim_t *)QM_FLASH_DATA_TRIM_BASE)
#define QM_FLASH_DATA_TRIM_CODE (&QM_FLASH_DATA_TRIM->fields)
#define QM_FLASH_DATA_TRIM_REGION QM_FLASH_REGION_SYS
#define QM_FLASH_TRIM_PRESENT_MASK (0xFC00)
#define QM_FLASH_TRIM_PRESENT (0x0000)
/**
* @}
*/
#endif /* __FLASH_LAYOUT_H__ */

View file

@ -0,0 +1,178 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __POWER_STATES_H__
#define __POWER_STATES_H__
#include "qm_common.h"
#include "qm_soc_regs.h"
/**
* SoC Power mode control for Quark SE Microcontrollers.
*
* @defgroup groupSoCPower Quark SE SoC Power states
* @{
*/
/**
* Enter SoC sleep state.
*
* Put the SoC into sleep state until next SoC wake event.
*
* - Core well is turned off
* - Always on well is on
* - Hybrid Clock is off
* - RTC Clock is on
*
* Possible SoC wake events are:
* - Low Power Comparator Interrupt
* - AON GPIO Interrupt
* - AON Timer Interrupt
* - RTC Interrupt
*/
void power_soc_sleep(void);
/**
* Enter SoC deep sleep state.
*
* Put the SoC into deep sleep state until next SoC wake event.
*
* - Core well is turned off
* - Always on well is on
* - Hybrid Clock is off
* - RTC Clock is on
*
* Possible SoC wake events are:
* - Low Power Comparator Interrupt
* - AON GPIO Interrupt
* - AON Timer Interrupt
* - RTC Interrupt
*
* This function puts 1P8V regulators and 3P3V into Linear Mode.
*/
void power_soc_deep_sleep(void);
/**
* Enable LPSS state entry.
*
* Put the SoC into LPSS on next C2/C2LP and SS2 state combination.<BR>
* SoC Hybrid Clock is gated in this state.<BR>
* Core Well Clocks are gated.<BR>
* RTC is the only clock remaining running.
*
* Possible SoC wake events are:
* - Low Power Comparator Interrupt
* - AON GPIO Interrupt
* - AON Timer Interrupt
* - RTC Interrupt
*/
void power_soc_lpss_enable(void);
/**
* Disable LPSS state entry.
*
* Clear LPSS enable flag.<BR>
* This will prevent entry in LPSS when cores are in C2/C2LP and SS2 states.
*/
void power_soc_lpss_disable(void);
/**
* @}
*/
#if (!QM_SENSOR)
/**
* Host Power mode control for Quark SE Microcontrollers.<BR>
* These functions cannot be called from the Sensor Subsystem.
*
* @defgroup groupSEPower Quark SE Host Power states
* @{
*/
/**
* Enter Host C1 state.
*
* Put the Host into C1.<BR>
* Processor Clock is gated in this state.<BR>
* Nothing is turned off in this state.
*
* A wake event causes the Host to transition to C0.<BR>
* A wake event is a host interrupt.
*/
void power_cpu_c1(void);
/**
* Enter Host C2 state or SoC LPSS state.
*
* Put the Host into C2.
* Processor Clock is gated in this state.
* All rails are supplied.
*
* This enables entry in LPSS if:
* - Sensor Subsystem is in SS2.
* - LPSS entry is enabled.
*
* If C2 is entered:
* - A wake event causes the Host to transition to C0.
* - A wake event is a host interrupt.
*
* If LPSS is entered:
* - LPSS wake events applies.
* - If the Sensor Subsystem wakes the SoC from LPSS, Host is back in C2.
*/
void power_cpu_c2(void);
/**
* Enter Host C2LP state or SoC LPSS state.
*
* Put the Host into C2LP.
* Processor Complex Clock is gated in this state.
* All rails are supplied.
*
* This enables entry in LPSS if:
* - Sensor Subsystem is in SS2.
* - LPSS is allowed.
*
* If C2LP is entered:
* - A wake event causes the Host to transition to C0.
* - A wake event is a Host interrupt.
*
* If LPSS is entered:
* - LPSS wake events apply if LPSS is entered.
* - If the Sensor Subsystem wakes the SoC from LPSS,
* Host transitions back to C2LP.
*/
void power_cpu_c2lp(void);
#endif
/**
* @}
*/
#endif /* __POWER_STATES_H__ */

View file

@ -0,0 +1,574 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __SENSOR_REGISTERS_H__
#define __SENSOR_REGISTERS_H__
#include "qm_common.h"
/**
* Quark SE SoC Sensor Subsystem Registers.
*
* For detailed description please read the SOC datasheet.
*
* @defgroup groupSSSEREG SoC Registers (Sensor Subsystem)
* @{
*/
#if (UNIT_TEST)
#define QM_SS_BASE_AUX_REGS_NUM (0x701)
/* Peripherals auxiliary registers start at
* 0x80010000 and ends at 0x80018180 */
#define QM_SS_PERIPH_AUX_REGS_BASE (0x80010000)
#define QM_SS_PERIPH_AUX_REGS_SIZE (0x8181)
#define QM_SS_AUX_REGS_SIZE \
(QM_SS_BASE_AUX_REGS_NUM + QM_SS_PERIPH_AUX_REGS_SIZE)
uint32_t test_sensor_aux[QM_SS_AUX_REGS_SIZE];
#define __builtin_arc_lr(addr) \
({ \
uint32_t temp = addr; \
if (temp >= QM_SS_PERIPH_AUX_REGS_BASE) { \
temp -= QM_SS_PERIPH_AUX_REGS_BASE; \
temp += QM_SS_BASE_AUX_REGS_NUM; \
} \
(test_sensor_aux[temp]); \
})
#define __builtin_arc_sr(val, addr) \
({ \
uint32_t temp = addr; \
if (temp >= QM_SS_PERIPH_AUX_REGS_BASE) { \
temp -= QM_SS_PERIPH_AUX_REGS_BASE; \
temp += QM_SS_BASE_AUX_REGS_NUM; \
} \
(test_sensor_aux[temp] = val); \
})
#define __builtin_arc_kflag(sreg)
#define __builtin_arc_brk()
#define __builtin_arc_clri()
#define __builtin_arc_seti(val)
#define __builtin_arc_nop()
#endif
/**
* @name Bitwise Operation Macros
* @{
*/
/** Bitwise OR operation macro for registers in the auxiliary memory space. */
#define QM_SS_REG_AUX_OR(reg, mask) \
(__builtin_arc_sr(__builtin_arc_lr(reg) | (mask), reg))
/** Bitwise NAND operation macro for registers in the auxiliary memory space. */
#define QM_SS_REG_AUX_NAND(reg, mask) \
(__builtin_arc_sr(__builtin_arc_lr(reg) & (~mask), reg))
/** @} */
/**
* @name Status and Control Register
* @{
*/
#define QM_SS_AUX_STATUS32 (0xA) /**< Sensor Subsystem status32 register. */
#define QM_SS_AUX_IC_CTRL (0x11) /**< Sensor Subsystem control register. */
/**< Sensor Subsystem cache invalidate register. */
#define QM_SS_AUX_IC_IVIL (0x19)
/**< Sensor Subsystem vector base register. */
#define QM_SS_AUX_INT_VECTOR_BASE (0x25)
/** @} */
/**
* @name SS Timer
* @{
*/
typedef enum {
QM_SS_TIMER_COUNT = 0,
QM_SS_TIMER_CONTROL,
QM_SS_TIMER_LIMIT
} qm_ss_timer_reg_t;
/**
* Sensor Subsystem Timers.
*/
typedef enum { QM_SS_TIMER_0 = 0, QM_SS_TIMER_NUM } qm_ss_timer_t;
#define QM_SS_TIMER_0_BASE (0x21)
#define QM_SS_TIMER_1_BASE (0x100)
#define QM_SS_TSC_BASE QM_SS_TIMER_1_BASE
#define QM_SS_TIMER_CONTROL_INT_EN_OFFSET (0)
#define QM_SS_TIMER_CONTROL_NON_HALTED_OFFSET (1)
#define QM_SS_TIMER_CONTROL_WATCHDOG_OFFSET (2)
#define QM_SS_TIMER_CONTROL_INT_PENDING_OFFSET (3)
/** @} */
/**
* GPIO registers and definitions.
*
* @name SS GPIO
* @{
*/
typedef enum {
QM_SS_GPIO_SWPORTA_DR = 0,
QM_SS_GPIO_SWPORTA_DDR,
QM_SS_GPIO_INTEN = 3,
QM_SS_GPIO_INTMASK,
QM_SS_GPIO_INTTYPE_LEVEL,
QM_SS_GPIO_INT_POLARITY,
QM_SS_GPIO_INTSTATUS,
QM_SS_GPIO_DEBOUNCE,
QM_SS_GPIO_PORTA_EOI,
QM_SS_GPIO_EXT_PORTA,
QM_SS_GPIO_LS_SYNC
} qm_ss_gpio_reg_t;
/* Sensor Subsystem GPIO register block type */
#define QM_SS_GPIO_NUM_PINS (16)
#define QM_SS_GPIO_LS_SYNC_CLK_EN BIT(31)
#define QM_SS_GPIO_LS_SYNC_SYNC_LVL BIT(0)
/** Sensor Subsystem GPIO */
typedef enum { QM_SS_GPIO_0 = 0, QM_SS_GPIO_1, QM_SS_GPIO_NUM } qm_ss_gpio_t;
#define QM_SS_GPIO_0_BASE (0x80017800)
#define QM_SS_GPIO_1_BASE (0x80017900)
/** @} */
/**
* I2C registers and definitions.
*
* @name SS I2C
* @{
*/
/** Sensor Subsystem I2C Registers*/
typedef enum {
QM_SS_I2C_CON = 0,
QM_SS_I2C_DATA_CMD,
QM_SS_I2C_SS_SCL_CNT,
QM_SS_I2C_FS_SCL_CNT = 0x04,
QM_SS_I2C_INTR_STAT = 0x06,
QM_SS_I2C_INTR_MASK,
QM_SS_I2C_TL,
QM_SS_I2C_INTR_CLR = 0x0A,
QM_SS_I2C_STATUS,
QM_SS_I2C_TXFLR,
QM_SS_I2C_RXFLR,
QM_SS_I2C_SDA_CONFIG,
QM_SS_I2C_TX_ABRT_SOURCE,
QM_SS_I2C_ENABLE_STATUS = 0x11
} qm_ss_i2c_reg_t;
/** Sensor Subsystem I2C register block type */
#define QM_SS_I2C_CON_ENABLE BIT(0)
#define QM_SS_I2C_CON_ABORT BIT(1)
#define QM_SS_I2C_CON_SPEED_SS BIT(3)
#define QM_SS_I2C_CON_SPEED_FS BIT(4)
#define QM_SS_I2C_CON_SPEED_MASK (0x18)
#define QM_SS_I2C_CON_IC_10BITADDR BIT(5)
#define QM_SS_I2C_CON_IC_10BITADDR_OFFSET (5)
#define QM_SS_I2C_CON_IC_10BITADDR_MASK (5)
#define QM_SS_I2C_CON_RESTART_EN BIT(7)
#define QM_SS_I2C_CON_TAR_SAR_OFFSET (9)
#define QM_SS_I2C_CON_TAR_SAR_MASK (0x7FE00)
#define QM_SS_I2C_CON_TAR_SAR_10_BIT_MASK (0xFF)
#define QM_SS_I2C_CON_SPKLEN_OFFSET (22)
#define QM_SS_I2C_CON_SPKLEN_MASK (0x3FC00000)
#define QM_SS_I2C_CON_CLK_ENA BIT(31)
#define QM_SS_I2C_DATA_CMD_CMD BIT(8)
#define QM_SS_I2C_DATA_CMD_STOP BIT(9)
#define QM_SS_I2C_DATA_CMD_PUSH (0xC0000000)
#define QM_SS_I2C_DATA_CMD_POP (0x80000000)
#define QM_SS_I2C_SS_FS_SCL_CNT_HCNT_OFFSET (16)
#define QM_SS_I2C_SS_FS_SCL_CNT_16BIT_MASK (0xFFFF)
#define QM_SS_I2C_INTR_STAT_RX_UNDER BIT(0)
#define QM_SS_I2C_INTR_STAT_RX_OVER BIT(1)
#define QM_SS_I2C_INTR_STAT_RX_FULL BIT(2)
#define QM_SS_I2C_INTR_STAT_TX_OVER BIT(3)
#define QM_SS_I2C_INTR_STAT_TX_EMPTY BIT(4)
#define QM_SS_I2C_INTR_STAT_TX_ABRT BIT(6)
#define QM_SS_I2C_INTR_MASK_ALL (0x0)
#define QM_SS_I2C_INTR_MASK_RX_UNDER BIT(0)
#define QM_SS_I2C_INTR_MASK_RX_OVER BIT(1)
#define QM_SS_I2C_INTR_MASK_RX_FULL BIT(2)
#define QM_SS_I2C_INTR_MASK_TX_OVER BIT(3)
#define QM_SS_I2C_INTR_MASK_TX_EMPTY BIT(4)
#define QM_SS_I2C_INTR_MASK_TX_ABRT BIT(6)
#define QM_SS_I2C_TL_TX_TL_OFFSET (16)
#define QM_SS_I2C_TL_RX_TL_MASK (0xFF)
#define QM_SS_I2C_TL_TX_TL_MASK (0xFF0000)
#define QM_SS_I2C_INTR_CLR_TX_ABRT BIT(6)
#define QM_SS_I2C_TX_ABRT_SOURCE_NAK_MASK (0x09)
#define QM_SS_I2C_TX_ABRT_SOURCE_ALL_MASK (0x1FFFF)
#define QM_SS_I2C_TX_ABRT_SBYTE_NORSTRT BIT(9)
#define QM_SS_I2C_TX_ABRT_SOURCE_ART_LOST BIT(12)
#define QM_SS_I2C_ENABLE_CONTROLLER_EN BIT(0)
#define QM_SS_I2C_ENABLE_STATUS_IC_EN BIT(0)
#define QM_SS_I2C_STATUS_BUSY_MASK (0x21)
#define QM_SS_I2C_STATUS_RFNE BIT(3)
#define QM_SS_I2C_STATUS_TFE BIT(2)
#define QM_SS_I2C_STATUS_TFNF BIT(1)
#define QM_SS_I2C_IC_LCNT_MAX (65525)
#define QM_SS_I2C_IC_LCNT_MIN (8)
#define QM_SS_I2C_IC_HCNT_MAX (65525)
#define QM_SS_I2C_IC_HCNT_MIN (6)
#define QM_SS_I2C_FIFO_SIZE (8)
/** Sensor Subsystem I2C */
typedef enum { QM_SS_I2C_0 = 0, QM_SS_I2C_1, QM_SS_I2C_NUM } qm_ss_i2c_t;
#define QM_SS_I2C_0_BASE (0x80012000)
#define QM_SS_I2C_1_BASE (0x80012100)
/** Sensor Subsystem ADC @{*/
/** Sensor Subsystem ADC registers */
typedef enum {
QM_SS_ADC_SET = 0, /**< ADC and sequencer settings register. */
QM_SS_ADC_DIVSEQSTAT, /**< ADC clock and sequencer status register. */
QM_SS_ADC_SEQ, /**< ADC sequence entry register. */
QM_SS_ADC_CTRL, /**< ADC control register. */
QM_SS_ADC_INTSTAT, /**< ADC interrupt status register. */
QM_SS_ADC_SAMPLE /**< ADC sample register. */
} qm_ss_adc_reg_t;
/** Sensor Subsystem ADC */
typedef enum {
QM_SS_ADC_0 = 0, /**< ADC first module. */
QM_SS_ADC_NUM
} qm_ss_adc_t;
/** SS ADC register base */
#define QM_SS_ADC_BASE (0x80015000)
/** For 1MHz, the max divisor is 7. */
#define QM_SS_ADC_DIV_MAX (7)
#define QM_SS_ADC_FIFO_LEN (32)
#define QM_SS_ADC_SET_POP_RX BIT(31)
#define QM_SS_ADC_SET_FLUSH_RX BIT(30)
#define QM_SS_ADC_SET_THRESHOLD_MASK (0x3F000000)
#define QM_SS_ADC_SET_THRESHOLD_OFFSET (24)
#define QM_SS_ADC_SET_SEQ_ENTRIES_MASK (0x3F0000)
#define QM_SS_ADC_SET_SEQ_ENTRIES_OFFSET (16)
#define QM_SS_ADC_SET_SEQ_MODE BIT(13)
#define QM_SS_ADC_SET_SAMPLE_WIDTH_MASK (0x1F)
#define QM_SS_ADC_DIVSEQSTAT_CLK_RATIO_MASK (0x1FFFFF)
#define QM_SS_ADC_CTRL_CLR_SEQERROR BIT(19)
#define QM_SS_ADC_CTRL_CLR_UNDERFLOW BIT(18)
#define QM_SS_ADC_CTRL_CLR_OVERFLOW BIT(17)
#define QM_SS_ADC_CTRL_CLR_DATA_A BIT(16)
#define QM_SS_ADC_CTRL_MSK_SEQERROR BIT(11)
#define QM_SS_ADC_CTRL_MSK_UNDERFLOW BIT(10)
#define QM_SS_ADC_CTRL_MSK_OVERFLOW BIT(9)
#define QM_SS_ADC_CTRL_MSK_DATA_A BIT(8)
#define QM_SS_ADC_CTRL_SEQ_TABLE_RST BIT(6)
#define QM_SS_ADC_CTRL_SEQ_PTR_RST BIT(5)
#define QM_SS_ADC_CTRL_SEQ_START BIT(4)
#define QM_SS_ADC_CTRL_CLK_ENA BIT(2)
#define QM_SS_ADC_CTRL_ADC_ENA BIT(1)
#define QM_SS_ADC_CTRL_MSK_ALL_INT (0xF00)
#define QM_SS_ADC_CTRL_CLR_ALL_INT (0xF0000)
#define QM_SS_ADC_SEQ_DELAYODD_OFFSET (21)
#define QM_SS_ADC_SEQ_MUXODD_OFFSET (16)
#define QM_SS_ADC_SEQ_DELAYEVEN_OFFSET (5)
#define QM_SS_ADC_SEQ_DUMMY (0x480)
#define QM_SS_ADC_INTSTAT_SEQERROR BIT(3)
#define QM_SS_ADC_INTSTAT_UNDERFLOW BIT(2)
#define QM_SS_ADC_INTSTAT_OVERFLOW BIT(1)
#define QM_SS_ADC_INTSTAT_DATA_A BIT(0)
/** End of Sensor Subsystem ADC @}*/
/**
* CREG Registers.
*
* @name SS CREG
* @{
*/
/* Sensor Subsystem CREG */
typedef enum {
QM_SS_IO_CREG_MST0_CTRL = 0x0, /**< Master control register. */
QM_SS_IO_CREG_SLV0_OBSR = 0x80, /**< Slave control register. */
QM_SS_IO_CREG_SLV1_OBSR = 0x180 /**< Slave control register. */
} qm_ss_creg_reg_t;
#define QM_SS_ADC_CAL_MAX (0x7F)
/* MST0_CTRL fields */
#define QM_SS_ADC_PWR_MODE_OFFSET (1)
#define QM_SS_ADC_PWR_MODE_MASK (0x7)
#define QM_SS_ADC_DELAY_OFFSET (3)
#define QM_SS_ADC_DELAY_MASK (0xFFF8)
#define QM_SS_ADC_CAL_REQ BIT(16)
#define QM_SS_ADC_CAL_CMD_OFFSET (17)
#define QM_SS_ADC_CAL_CMD_MASK (0xE0000)
#define QM_SS_ADC_CAL_VAL_SET_OFFSET (20)
#define QM_SS_ADC_CAL_VAL_SET_MASK (0x7F00000)
/* SLV0_OBSR fields */
#define QM_SS_ADC_CAL_VAL_GET_OFFSET (5)
#define QM_SS_ADC_CAL_VAL_GET_MASK (0xFE0)
#define QM_SS_ADC_CAL_ACK BIT(4)
#define QM_SS_ADC_PWR_MODE_STS BIT(3) /*FIXME doesnt match doc */
#define SS_CLK_PERIPH_ALL_IN_CREG \
(SS_CLK_PERIPH_ADC | SS_CLK_PERIPH_I2C_1 | SS_CLK_PERIPH_I2C_0 | \
SS_CLK_PERIPH_SPI_1 | SS_CLK_PERIPH_SPI_0)
/** SS CREG base */
#define QM_SS_CREG_BASE (0x80018000)
/** @} */
/**
* IRQs and interrupt vectors.
*
* @name SS Interrupt
* @{
*/
#define QM_SS_EXCEPTION_NUM (16) /**< Exceptions and traps in ARC EM core */
#define QM_SS_INT_TIMER_NUM (2) /**< Internal interrupts in ARC EM core */
#define QM_SS_IRQ_SENSOR_NUM (18) /**< IRQ's from the Sensor Subsystem */
#define QM_SS_IRQ_COMMON_NUM (32) /**< IRQ's from the common SoC fabric */
#define QM_SS_INT_VECTOR_NUM \
(QM_SS_EXCEPTION_NUM + QM_SS_INT_TIMER_NUM + QM_SS_IRQ_SENSOR_NUM + \
QM_SS_IRQ_COMMON_NUM)
#define QM_SS_IRQ_NUM (QM_SS_IRQ_SENSOR_NUM + QM_SS_IRQ_COMMON_NUM)
/*
* The following definitions are Sensor Subsystem interrupt irq and vector
* numbers:
* #define QM_SS_xxx - irq number
* #define QM_SS_xxx_VECTOR - vector number
*/
#define QM_SS_INT_TIMER_0 (16)
#define QM_SS_INT_TIMER_1 (17)
#define QM_SS_IRQ_ADC_ERR (0)
#define QM_SS_IRQ_ADC_ERR_VECTOR (18)
#define QM_SS_IRQ_ADC_IRQ (1)
#define QM_SS_IRQ_ADC_IRQ_VECTOR (19)
#define QM_SS_IRQ_GPIO_INTR_0 (2)
#define QM_SS_IRQ_GPIO_INTR_0_VECTOR (20)
#define QM_SS_IRQ_GPIO_INTR_1 (3)
#define QM_SS_IRQ_GPIO_INTR_1_VECTOR (21)
#define QM_SS_IRQ_I2C_0_ERR (4)
#define QM_SS_IRQ_I2C_0_ERR_VECTOR (22)
#define QM_SS_IRQ_I2C_0_RX_AVAIL (5)
#define QM_SS_IRQ_I2C_0_RX_AVAIL_VECTOR (23)
#define QM_SS_IRQ_I2C_0_TX_REQ (6)
#define QM_SS_IRQ_I2C_0_TX_REQ_VECTOR (24)
#define QM_SS_IRQ_I2C_0_STOP_DET (7)
#define QM_SS_IRQ_I2C_0_STOP_DET_VECTOR (25)
#define QM_SS_IRQ_I2C_1_ERR (8)
#define QM_SS_IRQ_I2C_1_ERR_VECTOR (26)
#define QM_SS_IRQ_I2C_1_RX_AVAIL (9)
#define QM_SS_IRQ_I2C_1_RX_AVAIL_VECTOR (27)
#define QM_SS_IRQ_I2C_1_TX_REQ (10)
#define QM_SS_IRQ_I2C_1_TX_REQ_VECTOR (28)
#define QM_SS_IRQ_I2C_1_STOP_DET (11)
#define QM_SS_IRQ_I2C_1_STOP_DET_VECTOR (29)
#define QM_SS_IRQ_SPI_0_ERR_INT (12)
#define QM_SS_IRQ_SPI_0_ERR_INT_VECTOR (30)
#define QM_SS_IRQ_SPI_0_RX_AVAIL (13)
#define QM_SS_IRQ_SPI_0_RX_AVAIL_VECTOR (31)
#define QM_SS_IRQ_SPI_0_TX_REQ (14)
#define QM_SS_IRQ_SPI_0_TX_REQ_VECTOR (32)
#define QM_SS_IRQ_SPI_1_ERR_INT (15)
#define QM_SS_IRQ_SPI_1_ERR_INT_VECTOR (33)
#define QM_SS_IRQ_SPI_1_RX_AVAIL (16)
#define QM_SS_IRQ_SPI_1_RX_AVAIL_VECTOR (34)
#define QM_SS_IRQ_SPI_1_TX_REQ (17)
#define QM_SS_IRQ_SPI_1_TX_REQ_VECTOR (35)
typedef enum {
QM_SS_INT_PRIORITY_0 = 0,
QM_SS_INT_PRIORITY_1 = 1,
QM_SS_INT_PRIORITY_15 = 15,
QM_SS_INT_PRIORITY_NUM
} qm_ss_irq_priority_t;
typedef enum { QM_SS_INT_DISABLE = 0, QM_SS_INT_ENABLE = 1 } qm_ss_irq_mask_t;
typedef enum {
QM_SS_IRQ_LEVEL_SENSITIVE = 0,
QM_SS_IRQ_EDGE_SENSITIVE = 1
} qm_ss_irq_trigger_t;
#define QM_SS_AUX_IRQ_CTRL (0xE)
#define QM_SS_AUX_IRQ_HINT (0x201)
#define QM_SS_AUX_IRQ_PRIORITY (0x206)
#define QM_SS_AUX_IRQ_STATUS (0x406)
#define QM_SS_AUX_IRQ_SELECT (0x40B)
#define QM_SS_AUX_IRQ_ENABLE (0x40C)
#define QM_SS_AUX_IRQ_TRIGER (0x40D)
/** @} */
/**
* I2C registers and definitions.
*
* @name SS SPI
* @{
*/
/** Sensor Subsystem SPI register map. */
typedef enum {
QM_SS_SPI_CTRL = 0, /**< SPI control register. */
QM_SS_SPI_SPIEN = 2, /**< SPI enable register. */
QM_SS_SPI_TIMING = 4, /**< SPI serial clock divider value. */
QM_SS_SPI_FTLR, /**< Threshold value for TX/RX FIFO. */
QM_SS_SPI_TXFLR = 7, /**< Number of valid data entries in TX FIFO. */
QM_SS_SPI_RXFLR, /**< Number of valid data entries in RX FIFO. */
QM_SS_SPI_SR, /**< SPI status register. */
QM_SS_SPI_INTR_STAT, /**< Interrupt status register. */
QM_SS_SPI_INTR_MASK, /**< Interrupt mask register. */
QM_SS_SPI_CLR_INTR, /**< Interrupt clear register. */
QM_SS_SPI_DR, /**< RW buffer for FIFOs. */
} qm_ss_spi_reg_t;
/** Sensor Subsystem SPI modules. */
typedef enum {
QM_SS_SPI_0 = 0, /**< SPI module 0 */
QM_SS_SPI_1, /**< SPI module 1 */
QM_SS_SPI_NUM
} qm_ss_spi_t;
#define QM_SS_SPI_0_BASE (0x80010000)
#define QM_SS_SPI_1_BASE (0x80010100)
#define QM_SS_SPI_CTRL_DFS_OFFS (0)
#define QM_SS_SPI_CTRL_DFS_MASK (0x0000000F)
#define QM_SS_SPI_CTRL_BMOD_OFFS (6)
#define QM_SS_SPI_CTRL_BMOD_MASK (0x000000C0)
#define QM_SS_SPI_CTRL_SCPH BIT(6)
#define QM_SS_SPI_CTRL_SCPOL BIT(7)
#define QM_SS_SPI_CTRL_TMOD_OFFS (8)
#define QM_SS_SPI_CTRL_TMOD_MASK (0x00000300)
#define QM_SS_SPI_CTRL_SRL BIT(11)
#define QM_SS_SPI_CTRL_CLK_ENA BIT(15)
#define QM_SS_SPI_CTRL_NDF_OFFS (16)
#define QM_SS_SPI_CTRL_NDF_MASK (0xFFFF0000)
#define QM_SS_SPI_SPIEN_EN BIT(0)
#define QM_SS_SPI_SPIEN_SER_OFFS (4)
#define QM_SS_SPI_SPIEN_SER_MASK (0x000000F0)
#define QM_SS_SPI_TIMING_SCKDV_OFFS (0)
#define QM_SS_SPI_TIMING_SCKDV_MASK (0x0000FFFF)
#define QM_SS_SPI_TIMING_RSD_OFFS (16)
#define QM_SS_SPI_TIMING_RSD_MASK (0x00FF0000)
#define QM_SS_SPI_FTLR_RFT_OFFS (0)
#define QM_SS_SPI_FTLR_RFT_MASK (0x0000FFFF)
#define QM_SS_SPI_FTLR_TFT_OFFS (16)
#define QM_SS_SPI_FTLR_TFT_MASK (0xFFFF0000)
#define QM_SS_SPI_SR_BUSY BIT(0)
#define QM_SS_SPI_SR_TFNF BIT(1)
#define QM_SS_SPI_SR_TFE BIT(2)
#define QM_SS_SPI_SR_RFNE BIT(3)
#define QM_SS_SPI_SR_RFF BIT(4)
#define QM_SS_SPI_INTR_TXEI BIT(0)
#define QM_SS_SPI_INTR_TXOI BIT(1)
#define QM_SS_SPI_INTR_RXUI BIT(2)
#define QM_SS_SPI_INTR_RXOI BIT(3)
#define QM_SS_SPI_INTR_RXFI BIT(4)
#define QM_SS_SPI_INTR_ALL (0x0000001F)
#define QM_SS_SPI_INTR_STAT_TXEI QM_SS_SPI_INTR_TXEI
#define QM_SS_SPI_INTR_STAT_TXOI QM_SS_SPI_INTR_TXOI
#define QM_SS_SPI_INTR_STAT_RXUI QM_SS_SPI_INTR_RXUI
#define QM_SS_SPI_INTR_STAT_RXOI QM_SS_SPI_INTR_RXOI
#define QM_SS_SPI_INTR_STAT_RXFI QM_SS_SPI_INTR_RXFI
#define QM_SS_SPI_INTR_MASK_TXEI QM_SS_SPI_INTR_TXEI
#define QM_SS_SPI_INTR_MASK_TXOI QM_SS_SPI_INTR_TXOI
#define QM_SS_SPI_INTR_MASK_RXUI QM_SS_SPI_INTR_RXUI
#define QM_SS_SPI_INTR_MASK_RXOI QM_SS_SPI_INTR_RXOI
#define QM_SS_SPI_INTR_MASK_RXFI QM_SS_SPI_INTR_RXFI
#define QM_SS_SPI_CLR_INTR_TXEI QM_SS_SPI_INTR_TXEI
#define QM_SS_SPI_CLR_INTR_TXOI QM_SS_SPI_INTR_TXOI
#define QM_SS_SPI_CLR_INTR_RXUI QM_SS_SPI_INTR_RXUI
#define QM_SS_SPI_CLR_INTR_RXOI QM_SS_SPI_INTR_RXOI
#define QM_SS_SPI_CLR_INTR_RXFI QM_SS_SPI_INTR_RXFI
#define QM_SS_SPI_DR_DR_OFFS (0)
#define QM_SS_SPI_DR_DR_MASK (0x0000FFFF)
#define QM_SS_SPI_DR_WR BIT(30)
#define QM_SS_SPI_DR_STROBE BIT(31)
#define QM_SS_SPI_DR_W_MASK (0xc0000000)
#define QM_SS_SPI_DR_R_MASK (0x80000000)
/** @} */
#endif /* __SENSOR_REGISTERS_H__ */

View file

@ -1,10 +1,10 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
@ -13,7 +13,7 @@
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@ -42,7 +42,11 @@
#define QUARK_SE (1)
#define HAS_4_TIMERS (1)
#define HAS_AON_GPIO (1)
#define HAS_MAILBOX (1)
#if !defined(QM_SENSOR)
#define HAS_APIC (1)
#endif
/**
* @defgroup groupSEREG Quark SE Registers
@ -66,7 +70,7 @@ typedef struct {
ccu_gpio_db_clk_ctl; /**< Peripheral Clock Divider Control 1 */
QM_RW uint32_t
ccu_ext_clock_ctl; /**< External Clock Control Register */
QM_RW uint32_t ccu_ss_periph_clk_gate_ctl; /**< Sensor subsustem
QM_RW uint32_t ccu_ss_periph_clk_gate_ctl; /**< Sensor Subsystem
peripheral clock gate
control */
QM_RW uint32_t ccu_lp_clk_ctl; /**< System Low Power Clock Control */
@ -90,6 +94,12 @@ qm_scss_ccu_reg_t test_scss_ccu;
#define QM_OSC0_LOCK_SI BIT(0)
#define QM_OSC0_LOCK_XTAL BIT(1)
#define QM_OSC0_EN_SI_OSC BIT(1)
#define QM_OSC0_PD BIT(2)
#define QM_OSC1_PD BIT(1)
#define QM_SI_OSC_1V2_MODE BIT(0)
#define QM_SCSS_CCU_SYS_CLK_SEL BIT(0)
#define QM_SCSS_CCU_C2_LP_EN (1)
#define QM_SCSS_CCU_SS_LPS_EN (0)
#define QM_OSC0_SI_FREQ_SEL_DEF_MASK (0xFFFFFCFF)
@ -156,6 +166,8 @@ qm_scss_cmp_reg_t test_scss_cmp;
#define QM_SCSS_CMP ((qm_scss_cmp_reg_t *)QM_SCSS_CMP_BASE)
#endif
#define QM_AC_HP_COMPARATORS_MASK (0x7FFC0)
typedef struct {
QM_RW uint32_t reg;
QM_RW uint32_t pad[3];
@ -210,7 +222,7 @@ qm_lapic_reg_t test_lapic;
* into IOAPIC. To trigger this manually we must write the vector number being
* serviced into the IOAPIC EOI register.
*/
#if !defined(USE_ISR_EOI)
#if defined(ISR_HANDLED) || defined(QM_SENSOR)
#define QM_ISR_EOI(vector)
#else
#define QM_ISR_EOI(vector) \
@ -320,8 +332,12 @@ qm_scss_int_reg_t test_scss_int;
#define QM_INT_SRAM_CONTROLLER_HOST_HALT_MASK BIT(16)
#define QM_INT_SRAM_CONTROLLER_HOST_MASK BIT(0)
#define QM_INT_SRAM_CONTROLLER_SS_HALT_MASK BIT(24)
#define QM_INT_SRAM_CONTROLLER_SS_MASK BIT(8)
#define QM_INT_FLASH_CONTROLLER_HOST_HALT_MASK BIT(16)
#define QM_INT_FLASH_CONTROLLER_HOST_MASK BIT(0)
#define QM_INT_FLASH_CONTROLLER_SS_HALT_MASK BIT(24)
#define QM_INT_FLASH_CONTROLLER_SS_MASK BIT(8)
typedef struct {
QM_RW uint32_t p_lvl2; /**< Processor level 2 */
@ -358,11 +374,33 @@ qm_scss_pmu_reg_t test_scss_pmu;
#define QM_SCSS_PMU ((qm_scss_pmu_reg_t *)QM_SCSS_PMU_BASE)
#endif
#define QM_SS_CFG_ARC_RUN_REQ_A BIT(24)
#define QM_P_STS_HALT_INTERRUPT_REDIRECTION BIT(26)
#define QM_P_STS_ARC_HALT BIT(14)
#define QM_SCSS_SLP_CFG_LPMODE_EN BIT(8)
#define QM_SCSS_SLP_CFG_RTC_DIS BIT(7)
#define QM_SCSS_PM1C_SLPEN BIT(13)
#define QM_SCSS_HOST_VR_EN BIT(7)
#define QM_SCSS_PLAT3P3_VR_EN BIT(7)
#define QM_SCSS_PLAT1P8_VR_EN BIT(7)
#define QM_SCSS_HOST_VR_VREG_SEL BIT(6)
#define QM_SCSS_PLAT3P3_VR_VREG_SEL BIT(6)
#define QM_SCSS_PLAT1P8_VR_VREG_SEL BIT(6)
#define QM_SCSS_VR_ROK BIT(10)
#define QM_SCSS_VR_EN BIT(7)
#define QM_SCSS_VR_VREG_SEL BIT(6)
#define QM_AON_VR_VSEL_MASK (0xFFE0)
#define QM_AON_VR_VSEL_1V2 (0x8)
#define QM_AON_VR_VSEL_1V35 (0xB)
#define QM_AON_VR_VSEL_1V8 (0x10)
#define QM_AON_VR_EN BIT(7)
#define QM_AON_VR_VSTRB BIT(5)
typedef struct {
QM_RW uint32_t ss_cfg; /**< Sensor Subsystem Configuration */
QM_RW uint32_t ss_sts; /**< Sensor subsystem status */
QM_RW uint32_t ss_sts; /**< Sensor Subsystem status */
} qm_scss_ss_reg_t;
#if (UNIT_TEST)
@ -440,6 +478,11 @@ qm_scss_pmux_reg_t test_scss_pmux;
#define QM_SCSS_PMUX ((qm_scss_pmux_reg_t *)QM_SCSS_PMUX_BASE)
#endif
/**
* Mailbox
*/
#define QM_MBOX_TRIGGER_CH_INT BIT(31)
typedef struct {
QM_RW uint32_t ch_ctrl; /**< Channel Control Word */
QM_RW uint32_t ch_data[4]; /**< Channel Payload Data Word 0 */
@ -593,14 +636,17 @@ typedef struct {
} qm_uart_reg_t;
#if (UNIT_TEST)
qm_uart_reg_t test_uart[QM_UART_NUM];
#define QM_UART ((qm_uart_reg_t *)(&test_uart))
qm_uart_reg_t test_uart_instance;
qm_uart_reg_t *test_uart[QM_UART_NUM];
#define QM_UART test_uart
#else
/** UART register base address */
#define QM_UART_BASE (0xB0002000)
#define QM_UART_0_BASE (0xB0002000)
#define QM_UART_1_BASE (0xB0002400)
/** UART register block */
#define QM_UART ((qm_uart_reg_t *)QM_UART_BASE)
extern qm_uart_reg_t *qm_uart[QM_UART_NUM];
#define QM_UART qm_uart
#endif
/**
@ -693,6 +739,50 @@ extern qm_spi_reg_t *qm_spi_controllers[QM_SPI_NUM];
#define QM_SPI_SLV_BASE (0xB0001800)
#endif
/* SPI Ctrlr0 register */
#define QM_SPI_CTRLR0_DFS_32_MASK (0x001F0000)
#define QM_SPI_CTRLR0_TMOD_MASK (0x00000300)
#define QM_SPI_CTRLR0_SCPOL_SCPH_MASK (0x000000C0)
#define QM_SPI_CTRLR0_FRF_MASK (0x00000030)
#define QM_SPI_CTRLR0_DFS_32_OFFSET (16)
#define QM_SPI_CTRLR0_TMOD_OFFSET (8)
#define QM_SPI_CTRLR0_SCPOL_SCPH_OFFSET (6)
#define QM_SPI_CTRLR0_FRF_OFFSET (4)
/* SPI SSI Enable register */
#define QM_SPI_SSIENR_SSIENR BIT(0)
/* SPI Status register */
#define QM_SPI_SR_BUSY BIT(0)
#define QM_SPI_SR_TFNF BIT(1)
#define QM_SPI_SR_TFE BIT(2)
/* SPI Interrupt Mask register */
#define QM_SPI_IMR_MASK_ALL (0x00)
#define QM_SPI_IMR_TXEIM BIT(0)
#define QM_SPI_IMR_TXOIM BIT(1)
#define QM_SPI_IMR_RXUIM BIT(2)
#define QM_SPI_IMR_RXOIM BIT(3)
#define QM_SPI_IMR_RXFIM BIT(4)
/* SPI Interrupt Status register */
#define QM_SPI_ISR_TXEIS BIT(0)
#define QM_SPI_ISR_TXOIS BIT(1)
#define QM_SPI_ISR_RXUIS BIT(2)
#define QM_SPI_ISR_RXOIS BIT(3)
#define QM_SPI_ISR_RXFIS BIT(4)
/* SPI Raw Interrupt Status register */
#define QM_SPI_RISR_TXEIR BIT(0)
#define QM_SPI_RISR_TXOIR BIT(1)
#define QM_SPI_RISR_RXUIR BIT(2)
#define QM_SPI_RISR_RXOIR BIT(3)
#define QM_SPI_RISR_RXFIR BIT(4)
/* SPI DMA control */
#define QM_SPI_DMACR_RDMAE BIT(0)
#define QM_SPI_DMACR_TDMAE BIT(1)
/**
* Number of I2C controllers.
*/
@ -742,7 +832,7 @@ typedef struct {
QM_RW uint32_t ic_sda_hold; /**< SDA Hold */
QM_RW uint32_t ic_tx_abrt_source; /**< Transmit Abort Source */
QM_RW uint32_t reserved;
QM_RW uint32_t ic_dma_cr; /**< SDA Setup */
QM_RW uint32_t ic_dma_cr; /**< SDA Setup */
QM_RW uint32_t ic_dma_tdlr; /**< DMA Transmit Data Level Register */
QM_RW uint32_t ic_dma_rdlr; /**< I2C Receive Data Level Register */
QM_RW uint32_t ic_sda_setup; /**< SDA Setup */
@ -758,16 +848,19 @@ typedef struct {
} qm_i2c_reg_t;
#if (UNIT_TEST)
qm_i2c_reg_t test_i2c[QM_I2C_NUM];
qm_i2c_reg_t test_i2c_instance[QM_I2C_NUM];
qm_i2c_reg_t *test_i2c[QM_I2C_NUM];
#define QM_I2C ((qm_i2c_reg_t *)(&test_i2c))
#define QM_I2C test_i2c
#else
/** I2C Master register base address */
#define QM_I2C_BASE (0xB0002800)
#define QM_I2C_0_BASE (0xB0002800)
#define QM_I2C_1_BASE (0xB0002C00)
/** I2C register block */
#define QM_I2C ((qm_i2c_reg_t *)QM_I2C_BASE)
extern qm_i2c_reg_t *qm_i2c[QM_I2C_NUM];
#define QM_I2C qm_i2c
#endif
#define QM_I2C_IC_ENABLE_CONTROLLER_EN BIT(0)
@ -786,6 +879,7 @@ qm_i2c_reg_t test_i2c[QM_I2C_NUM];
#define QM_I2C_IC_CON_RESTART_EN BIT(5)
#define QM_I2C_IC_DATA_CMD_READ BIT(8)
#define QM_I2C_IC_DATA_CMD_STOP_BIT_CTRL BIT(9)
#define QM_I2C_IC_DATA_CMD_LSB_MASK (0x000000FF)
#define QM_I2C_IC_RAW_INTR_STAT_RX_FULL BIT(2)
#define QM_I2C_IC_RAW_INTR_STAT_TX_ABRT BIT(6)
#define QM_I2C_IC_TX_ABRT_SOURCE_NAK_MASK (0x1F)
@ -818,6 +912,10 @@ qm_i2c_reg_t test_i2c[QM_I2C_NUM];
#define QM_I2C_FIFO_SIZE (16)
/* I2C DMA */
#define QM_I2C_IC_DMA_CR_RX_ENABLE BIT(0)
#define QM_I2C_IC_DMA_CR_TX_ENABLE BIT(1)
/**
* Number of GPIO controllers.
*/
@ -892,37 +990,42 @@ typedef struct {
QM_RW uint32_t padding[0x3FFF2]; /* (0x100000 - 0x38) / 4 */
} qm_flash_reg_t;
#define QM_FLASH_PAGE_MASK (0x3F800)
#if (UNIT_TEST)
qm_flash_reg_t test_flash[QM_FLASH_NUM];
uint32_t test_flash_page[0x200];
qm_flash_reg_t test_flash_instance;
qm_flash_reg_t *test_flash[QM_FLASH_NUM];
uint8_t test_flash_page[0x800];
#define QM_FLASH_BASE ((uint32_t *)&test_flash)
#define QM_FLASH ((qm_flash_reg_t *)(&test_flash))
#define QM_FLASH test_flash
#define QM_FLASH_REGION_SYS_1_BASE (test_flash_page)
#define QM_FLASH_REGION_SYS_0_BASE (test_flash_page)
#define QM_FLASH_REGION_OTP_0_BASE (test_flash_page)
#define QM_FLASH_PAGE_MASK (0xCFF)
#define QM_FLASH_MAX_ADDR (0xFFFFFFFF)
#else
/** Flash controller 0 register block */
#define QM_FLASH_BASE (0xB0100000)
#define QM_FLASH ((qm_flash_reg_t *)QM_FLASH_BASE)
#define QM_FLASH_BASE_0 (0xB0100000)
#define QM_FLASH_BASE_1 (0xB0200000)
extern qm_flash_reg_t *qm_flash[QM_FLASH_NUM];
#define QM_FLASH qm_flash
/** Flash physical address mappings */
#define QM_FLASH_REGION_SYS_1_BASE (0x40030000)
#define QM_FLASH_REGION_SYS_0_BASE (0x40000000)
#define QM_FLASH_REGION_OTP_0_BASE (0xFFFFE000)
#define QM_FLASH_PAGE_MASK (0x3F800)
#define QM_FLASH_MAX_ADDR (0x30000)
#endif
#define QM_FLASH_REGION_DATA_BASE_OFFSET (0x00)
#define QM_FLASH_MAX_WAIT_STATES (0xF)
#define QM_FLASH_MAX_US_COUNT (0x3F)
#define QM_FLASH_MAX_ADDR (0x30000)
#define QM_FLASH_MAX_PAGE_NUM (QM_FLASH_MAX_ADDR / (4 * QM_FLASH_PAGE_SIZE))
#define QM_FLASH_MAX_PAGE_NUM \
(QM_FLASH_MAX_ADDR / (4 * QM_FLASH_PAGE_SIZE_DWORDS))
#define QM_FLASH_LVE_MODE BIT(5)
/**
* Memory Protection Region register block type.
@ -954,63 +1057,6 @@ qm_mpr_reg_t test_mpr;
#define QM_MPR_UP_BOUND_OFFSET (10)
#define QM_MPR_VSTS_VALID BIT(31)
/**
* DMA register block type.
*/
/**
* DMA Channel struct
*/
typedef struct {
QM_RW uint32_t sar; /**< Source address */
QM_RW uint32_t dar; /**< Destination address */
QM_RW uint32_t llp; /**< Linked list pointer */
QM_RW uint32_t ctrl_l; /**< Control, lower */
QM_RW uint32_t ctrl_u; /**< Control, upper */
QM_RW uint32_t sstat; /**< Source status */
QM_RW uint32_t dstat; /**< Destination status */
QM_RW uint32_t sstatar; /**< Source status address */
QM_RW uint32_t dstatar; /**< Destination status address */
QM_RW uint32_t cfg_l; /**< Configuration, lower */
QM_RW uint32_t cfg_u; /**< Configuration, upper */
QM_RW uint32_t sgr; /**< Source, gather */
QM_RW uint32_t dsr; /**< Destination, scatter */
} qm_dma_channel_t;
typedef struct {
} qm_dma_t;
#if (UNIT_TEST)
qm_dma_t test_dma;
#define QM_DMA ((qm_dma_t *)(&test_dma))
#else
#define QM_DMA_BASE (0xB0700000)
#define QM_DMA ((qm_dma_t *)QM_DMA_BASE)
#endif
/* DMA Channels */
typedef enum {
QM_DMA_UART_0_TX = 0,
QM_DMA_UART_0_RX,
QM_DMA_UART_1_TX,
QM_DMA_UART_1_RX,
QM_DMA_SPI_MST_0_TX,
QM_DMA_SPI_MST_0_RX,
QM_DMA_SPI_MST_1_TX,
QM_DMA_SPI_MST_1_RX,
QM_DMA_SPI_SLV_TX,
QM_DMA_SPI_SLV_RX,
QM_DMA_I2S_TX,
QM_DMA_I2S_RX,
QM_DMA_I2C_MST_0_TX,
QM_DMA_I2C_MST_0_RX,
QM_DMA_I2C_MST_1_TX,
QM_DMA_I2C_MST_1_RX
} qm_dma_channels_t;
#define QM_OSC0_PD BIT(2)
/** External Clock Control @{*/
@ -1021,6 +1067,7 @@ typedef enum {
/** GPIO Debounce Clock Control @{*/
#define QM_CCU_GPIO_DB_DIV_OFFSET (2)
#define QM_CCU_GPIO_DB_CLK_DIV_EN BIT(1)
#define QM_CCU_GPIO_DB_CLK_EN BIT(0)
/** End of GPIO Debounce Clock Control @}*/
/** Peripheral clock divider control 0 @{*/
@ -1033,10 +1080,46 @@ typedef enum {
#define QM_CCU_RTC_CLK_EN BIT(1)
#define QM_CCU_RTC_CLK_DIV_EN BIT(2)
#define QM_CCU_SYS_CLK_DIV_EN BIT(7)
#define QM_CCU_SYS_CLK_DIV_MASK (0x00000300)
#define QM_CCU_DMA_CLK_EN BIT(6)
#define QM_CCU_RTC_CLK_DIV_OFFSET (3)
#define QM_CCU_SYS_CLK_DIV_OFFSET (8)
/**
* Peripheral clock type.
*/
typedef enum {
CLK_PERIPH_REGISTER = BIT(0), /**< Peripheral Clock Gate Enable. */
CLK_PERIPH_CLK = BIT(1), /**< Peripheral Clock Enable. */
CLK_PERIPH_I2C_M0 = BIT(2), /**< I2C Master 0 Clock Enable. */
CLK_PERIPH_I2C_M1 = BIT(3), /**< I2C Master 1 Clock Enable. */
CLK_PERIPH_SPI_S = BIT(4), /**< SPI Slave Clock Enable. */
CLK_PERIPH_SPI_M0 = BIT(5), /**< SPI Master 0 Clock Enable. */
CLK_PERIPH_SPI_M1 = BIT(6), /**< SPI Master 1 Clock Enable. */
CLK_PERIPH_GPIO_INTERRUPT = BIT(7), /**< GPIO Interrupt Clock Enable. */
CLK_PERIPH_GPIO_DB = BIT(8), /**< GPIO Debounce Clock Enable. */
CLK_PERIPH_I2S = BIT(9), /**< I2S Clock Enable. */
CLK_PERIPH_WDT_REGISTER = BIT(10), /**< Watchdog Clock Enable. */
CLK_PERIPH_RTC_REGISTER = BIT(11), /**< RTC Clock Gate Enable. */
CLK_PERIPH_PWM_REGISTER = BIT(12), /**< PWM Clock Gate Enable. */
CLK_PERIPH_GPIO_REGISTER = BIT(13), /**< GPIO Clock Gate Enable. */
CLK_PERIPH_SPI_M0_REGISTER =
BIT(14), /**< SPI Master 0 Clock Gate Enable. */
CLK_PERIPH_SPI_M1_REGISTER =
BIT(15), /**< SPI Master 1 Clock Gate Enable. */
CLK_PERIPH_SPI_S_REGISTER =
BIT(16), /**< SPI Slave Clock Gate Enable. */
CLK_PERIPH_UARTA_REGISTER = BIT(17), /**< UARTA Clock Gate Enable. */
CLK_PERIPH_UARTB_REGISTER = BIT(18), /**< UARTB Clock Gate Enable. */
CLK_PERIPH_I2C_M0_REGISTER =
BIT(19), /**< I2C Master 0 Clock Gate Enable. */
CLK_PERIPH_I2C_M1_REGISTER =
BIT(20), /**< I2C Master 1 Clock Gate Enable. */
CLK_PERIPH_I2S_REGISTER = BIT(21), /**< I2S Clock Gate Enable. */
CLK_PERIPH_ALL = 0x3FFFFF /**< Quark SE peripherals Mask. */
} clk_periph_t;
/* Default mask values */
#define CLK_EXTERN_DIV_DEF_MASK (0xFFFFFFE3)
#define CLK_SYS_CLK_DIV_DEF_MASK (0xFFFFF87F)
@ -1049,6 +1132,212 @@ typedef enum {
/** End of System clock control 0 @}*/
/** DMA */
/**
* DMA instances
*/
typedef enum {
QM_DMA_0, /**< DMA controller id. */
QM_DMA_NUM /**< Number of DMA controllers. */
} qm_dma_t;
/**
* DMA channel IDs
*/
typedef enum {
QM_DMA_CHANNEL_0 = 0, /**< DMA channel id for channel 0 */
QM_DMA_CHANNEL_1, /**< DMA channel id for channel 1 */
QM_DMA_CHANNEL_2, /**< DMA channel id for channel 2 */
QM_DMA_CHANNEL_3, /**< DMA channel id for channel 3 */
QM_DMA_CHANNEL_4, /**< DMA channel id for channel 4 */
QM_DMA_CHANNEL_5, /**< DMA channel id for channel 5 */
QM_DMA_CHANNEL_6, /**< DMA channel id for channel 6 */
QM_DMA_CHANNEL_7, /**< DMA channel id for channel 7 */
QM_DMA_CHANNEL_NUM /**< Number of DMA channels */
} qm_dma_channel_id_t;
/**
* DMA hardware handshake interfaces
*/
typedef enum {
DMA_HW_IF_UART_A_TX = 0x0, /**< UART_A_TX */
DMA_HW_IF_UART_A_RX = 0x1, /**< UART_A_RX */
DMA_HW_IF_UART_B_TX = 0x2, /**< UART_B_TX*/
DMA_HW_IF_UART_B_RX = 0x3, /**< UART_B_RX */
DMA_HW_IF_SPI_MASTER_0_TX = 0x4, /**< SPI_Master_0_TX */
DMA_HW_IF_SPI_MASTER_0_RX = 0x5, /**< SPI_Master_0_RX */
DMA_HW_IF_SPI_MASTER_1_TX = 0x6, /**< SPI_Master_1_TX */
DMA_HW_IF_SPI_MASTER_1_RX = 0x7, /**< SPI_Master_1_RX */
DMA_HW_IF_SPI_SLAVE_TX = 0x8, /**< SPI_Slave_TX */
DMA_HW_IF_SPI_SLAVE_RX = 0x9, /**< SPI_Slave_RX */
DMA_HW_IF_I2S_PLAYBACK = 0xa, /**< I2S_Playback channel */
DMA_HW_IF_I2S_CAPTURE = 0xb, /**< I2S_Capture channel */
DMA_HW_IF_I2C_MASTER_0_TX = 0xc, /**< I2C_Master_0_TX */
DMA_HW_IF_I2C_MASTER_0_RX = 0xd, /**< I2C_Master_0_RX */
DMA_HW_IF_I2C_MASTER_1_TX = 0xe, /**< I2C_Master_1_TX */
DMA_HW_IF_I2C_MASTER_1_RX = 0xf, /**< I2C_Master_1_RX */
} qm_dma_handshake_interface_t;
/**
* DMA channel register block type
*/
typedef struct {
QM_RW uint32_t sar_low; /**< SAR */
QM_RW uint32_t sar_high; /**< SAR */
QM_RW uint32_t dar_low; /**< DAR */
QM_RW uint32_t dar_high; /**< DAR */
QM_RW uint32_t llp_low; /**< LLP */
QM_RW uint32_t llp_high; /**< LLP */
QM_RW uint32_t ctrl_low; /**< CTL */
QM_RW uint32_t ctrl_high; /**< CTL */
QM_RW uint32_t src_stat_low; /**< SSTAT */
QM_RW uint32_t src_stat_high; /**< SSTAT */
QM_RW uint32_t dst_stat_low; /**< DSTAT */
QM_RW uint32_t dst_stat_high; /**< DSTAT */
QM_RW uint32_t src_stat_addr_low; /**< SSTATAR */
QM_RW uint32_t src_stat_addr_high; /**< SSTATAR */
QM_RW uint32_t dst_stat_addr_low; /**< DSTATAR */
QM_RW uint32_t dst_stat_addr_high; /**< DSTATAR */
QM_RW uint32_t cfg_low; /**< CFG */
QM_RW uint32_t cfg_high; /**< CFG */
QM_RW uint32_t src_sg_low; /**< SGR */
QM_RW uint32_t src_sg_high; /**< SGR */
QM_RW uint32_t dst_sg_low; /**< DSR */
QM_RW uint32_t dst_sg_high; /**< DSR */
} qm_dma_chan_reg_t;
/** DMA channel control register offsets and masks */
#define QM_DMA_CTL_L_INT_EN_MASK BIT(0)
#define QM_DMA_CTL_L_DST_TR_WIDTH_OFFSET (1)
#define QM_DMA_CTL_L_DST_TR_WIDTH_MASK (0x7 << QM_DMA_CTL_L_DST_TR_WIDTH_OFFSET)
#define QM_DMA_CTL_L_SRC_TR_WIDTH_OFFSET (4)
#define QM_DMA_CTL_L_SRC_TR_WIDTH_MASK (0x7 << QM_DMA_CTL_L_SRC_TR_WIDTH_OFFSET)
#define QM_DMA_CTL_L_DINC_OFFSET (7)
#define QM_DMA_CTL_L_DINC_MASK (0x3 << QM_DMA_CTL_L_DINC_OFFSET)
#define QM_DMA_CTL_L_SINC_OFFSET (9)
#define QM_DMA_CTL_L_SINC_MASK (0x3 << QM_DMA_CTL_L_SINC_OFFSET)
#define QM_DMA_CTL_L_DEST_MSIZE_OFFSET (11)
#define QM_DMA_CTL_L_DEST_MSIZE_MASK (0x7 << QM_DMA_CTL_L_DEST_MSIZE_OFFSET)
#define QM_DMA_CTL_L_SRC_MSIZE_OFFSET (14)
#define QM_DMA_CTL_L_SRC_MSIZE_MASK (0x7 << QM_DMA_CTL_L_SRC_MSIZE_OFFSET)
#define QM_DMA_CTL_L_TT_FC_OFFSET (20)
#define QM_DMA_CTL_L_TT_FC_MASK (0x7 << QM_DMA_CTL_L_TT_FC_OFFSET)
#define QM_DMA_CTL_L_LLP_DST_EN_MASK BIT(27)
#define QM_DMA_CTL_L_LLP_SRC_EN_MASK BIT(28)
#define QM_DMA_CTL_H_BLOCK_TS_OFFSET (0)
#define QM_DMA_CTL_H_BLOCK_TS_MASK (0xfff << QM_DMA_CTL_H_BLOCK_TS_OFFSET)
#define QM_DMA_CTL_H_BLOCK_TS_MAX 4095
#define QM_DMA_CTL_H_BLOCK_TS_MIN 1
/** DMA channel config register offsets and masks */
#define QM_DMA_CFG_L_CH_SUSP_MASK BIT(8)
#define QM_DMA_CFG_L_FIFO_EMPTY_MASK BIT(9)
#define QM_DMA_CFG_L_HS_SEL_DST_OFFSET 10
#define QM_DMA_CFG_L_HS_SEL_DST_MASK BIT(QM_DMA_CFG_L_HS_SEL_DST_OFFSET)
#define QM_DMA_CFG_L_HS_SEL_SRC_OFFSET 11
#define QM_DMA_CFG_L_HS_SEL_SRC_MASK BIT(QM_DMA_CFG_L_HS_SEL_SRC_OFFSET)
#define QM_DMA_CFG_L_DST_HS_POL_OFFSET 18
#define QM_DMA_CFG_L_DST_HS_POL_MASK BIT(QM_DMA_CFG_L_DST_HS_POL_OFFSET)
#define QM_DMA_CFG_L_SRC_HS_POL_OFFSET 19
#define QM_DMA_CFG_L_SRC_HS_POL_MASK BIT(QM_DMA_CFG_L_SRC_HS_POL_OFFSET)
#define QM_DMA_CFG_L_RELOAD_SRC_MASK BIT(30)
#define QM_DMA_CFG_L_RELOAD_DST_MASK BIT(31)
#define QM_DMA_CFG_H_SRC_PER_OFFSET (7)
#define QM_DMA_CFG_H_SRC_PER_MASK (0xf << QM_DMA_CFG_H_SRC_PER_OFFSET)
#define QM_DMA_CFG_H_DEST_PER_OFFSET (11)
#define QM_DMA_CFG_H_DEST_PER_MASK (0xf << QM_DMA_CFG_H_DEST_PER_OFFSET)
/**
* DMA interrupt register block type
*/
typedef struct {
QM_RW uint32_t raw_tfr_low; /**< RawTfr */
QM_RW uint32_t raw_tfr_high; /**< RawTfr */
QM_RW uint32_t raw_block_low; /**< RawBlock */
QM_RW uint32_t raw_block_high; /**< RawBlock */
QM_RW uint32_t raw_src_trans_low; /**< RawSrcTran */
QM_RW uint32_t raw_src_trans_high; /**< RawSrcTran */
QM_RW uint32_t raw_dst_trans_low; /**< RawDstTran */
QM_RW uint32_t raw_dst_trans_high; /**< RawDstTran */
QM_RW uint32_t raw_err_low; /**< RawErr */
QM_RW uint32_t raw_err_high; /**< RawErr */
QM_RW uint32_t status_tfr_low; /**< StatusTfr */
QM_RW uint32_t status_tfr_high; /**< StatusTfr */
QM_RW uint32_t status_block_low; /**< StatusBlock */
QM_RW uint32_t status_block_high; /**< StatusBlock */
QM_RW uint32_t status_src_trans_low; /**< StatusSrcTran */
QM_RW uint32_t status_src_trans_high; /**< StatusSrcTran */
QM_RW uint32_t status_dst_trans_low; /**< StatusDstTran */
QM_RW uint32_t status_dst_trans_high; /**< StatusDstTran */
QM_RW uint32_t status_err_low; /**< StatusErr */
QM_RW uint32_t status_err_high; /**< StatusErr */
QM_RW uint32_t mask_tfr_low; /**< MaskTfr */
QM_RW uint32_t mask_tfr_high; /**< MaskTfr */
QM_RW uint32_t mask_block_low; /**< MaskBlock */
QM_RW uint32_t mask_block_high; /**< MaskBlock */
QM_RW uint32_t mask_src_trans_low; /**< MaskSrcTran */
QM_RW uint32_t mask_src_trans_high; /**< MaskSrcTran */
QM_RW uint32_t mask_dst_trans_low; /**< MaskDstTran */
QM_RW uint32_t mask_dst_trans_high; /**< MaskDstTran */
QM_RW uint32_t mask_err_low; /**< MaskErr */
QM_RW uint32_t mask_err_high; /**< MaskErr */
QM_RW uint32_t clear_tfr_low; /**< ClearTfr */
QM_RW uint32_t clear_tfr_high; /**< ClearTfr */
QM_RW uint32_t clear_block_low; /**< ClearBlock */
QM_RW uint32_t clear_block_high; /**< ClearBlock */
QM_RW uint32_t clear_src_trans_low; /**< ClearSrcTran */
QM_RW uint32_t clear_src_trans_high; /**< ClearSrcTran */
QM_RW uint32_t clear_dst_trans_low; /**< ClearDstTran */
QM_RW uint32_t clear_dst_trans_high; /**< ClearDstTran */
QM_RW uint32_t clear_err_low; /**< ClearErr */
QM_RW uint32_t clear_err_high; /**< ClearErr */
QM_RW uint32_t status_int_low; /**< StatusInt */
QM_RW uint32_t status_int_high; /**< StatusInt */
} qm_dma_int_reg_t;
/** DMA interrupt status register bits */
#define QM_DMA_INT_STATUS_TFR BIT(0)
#define QM_DMA_INT_STATUS_ERR BIT(4)
/**
* DMA miscellaneous register block type
*/
typedef struct {
QM_RW uint32_t cfg_low; /**< DmaCfgReg */
QM_RW uint32_t cfg_high; /**< DmaCfgReg */
QM_RW uint32_t chan_en_low; /**< ChEnReg */
QM_RW uint32_t chan_en_high; /**< ChEnReg */
QM_RW uint32_t id_low; /**< DmaIdReg */
QM_RW uint32_t id_high; /**< DmaIdReg */
QM_RW uint32_t test_low; /**< DmaTestReg */
QM_RW uint32_t test_high; /**< DmaTestReg */
QM_RW uint32_t reserved[4]; /**< Reserved */
} qm_dma_misc_reg_t;
/** Channel write enable in the misc channel enable register */
#define QM_DMA_MISC_CHAN_EN_WE_OFFSET (8)
/** Controller enable bit in the misc config register */
#define QM_DMA_MISC_CFG_DMA_EN BIT(0)
typedef struct {
QM_RW qm_dma_chan_reg_t chan_reg[8]; /**< Channel Register */
QM_RW qm_dma_int_reg_t int_reg; /**< Interrupt Register */
QM_RW uint32_t reserved[12]; /**< Reserved (SW HS) */
QM_RW qm_dma_misc_reg_t misc_reg; /**< Miscellaneous Register */
} qm_dma_reg_t;
#if (UNIT_TEST)
qm_dma_reg_t test_dma_instance[QM_DMA_NUM];
qm_dma_reg_t *test_dma[QM_DMA_NUM];
#define QM_DMA test_dma
#else
#define QM_DMA_BASE (0xB0700000)
extern qm_dma_reg_t *qm_dma[QM_DMA_NUM];
#define QM_DMA qm_dma
#endif
/** IRQs and interrupt vectors.
*
* Any IRQ > 1 actually has a SCSS mask register offset of +1.
@ -1056,6 +1345,9 @@ typedef enum {
* parentheses because they are expanded as token concatenation.
*/
#define QM_INT_VECTOR_DOUBLE_FAULT 8
#define QM_INT_VECTOR_PIC_TIMER 32
#define QM_IRQ_GPIO_0 (8)
#define QM_IRQ_GPIO_0_MASK_OFFSET (9)
#define QM_IRQ_GPIO_0_VECTOR 44
@ -1088,6 +1380,10 @@ typedef enum {
#define QM_IRQ_I2C_1_MASK_OFFSET (1)
#define QM_IRQ_I2C_1_VECTOR 37
#define QM_IRQ_MBOX (21)
#define QM_IRQ_MBOX_MASK_OFFSET (22)
#define QM_IRQ_MBOX_VECTOR 57
#define QM_IRQ_AC (22)
#define QM_IRQ_AC_MASK_OFFSET (26)
#define QM_IRQ_AC_VECTOR 58
@ -1120,7 +1416,49 @@ typedef enum {
#define QM_IRQ_UART_1_MASK_OFFSET (7)
#define QM_IRQ_UART_1_VECTOR 42
#define QM_INT_VECTOR_PIC_TIMER 32
#define QM_IRQ_DMA_0 (13)
#define QM_IRQ_DMA_0_MASK_OFFSET (14)
#define QM_IRQ_DMA_0_VECTOR 49
#define QM_IRQ_DMA_1 (14)
#define QM_IRQ_DMA_1_MASK_OFFSET (15)
#define QM_IRQ_DMA_1_VECTOR 50
#define QM_IRQ_DMA_2 (15)
#define QM_IRQ_DMA_2_MASK_OFFSET (16)
#define QM_IRQ_DMA_2_VECTOR 51
#define QM_IRQ_DMA_3 (16)
#define QM_IRQ_DMA_3_MASK_OFFSET (17)
#define QM_IRQ_DMA_3_VECTOR 52
#define QM_IRQ_DMA_4 (17)
#define QM_IRQ_DMA_4_MASK_OFFSET (18)
#define QM_IRQ_DMA_4_VECTOR 53
#define QM_IRQ_DMA_5 (18)
#define QM_IRQ_DMA_5_MASK_OFFSET (19)
#define QM_IRQ_DMA_5_VECTOR 54
#define QM_IRQ_DMA_6 (19)
#define QM_IRQ_DMA_6_MASK_OFFSET (20)
#define QM_IRQ_DMA_6_VECTOR 55
#define QM_IRQ_DMA_7 (20)
#define QM_IRQ_DMA_7_MASK_OFFSET (21)
#define QM_IRQ_DMA_7_VECTOR 56
#define QM_IRQ_DMA_ERR (24)
#define QM_IRQ_DMA_ERR_MASK_OFFSET (28)
#define QM_IRQ_DMA_ERR_VECTOR 60
#define QM_SS_IRQ_ADC_PWR (29)
#define QM_SS_IRQ_ADC_PWR_MASK_OFFSET (33)
#define QM_SS_IRQ_ADC_PWR_VECTOR (65)
#define QM_SS_IRQ_ADC_CAL (30)
#define QM_SS_IRQ_ADC_CAL_MASK_OFFSET (34)
#define QM_SS_IRQ_ADC_CAL_VECTOR (66)
/* Hybrid oscillator output select select (0=Silicon, 1=Crystal)*/
#define QM_OSC0_MODE_SEL BIT(3)
@ -1133,6 +1471,12 @@ typedef enum {
#define OSC0_CFG0_OSC0_XTAL_COUNT_VALUE_MASK (0x00600000)
#define OSC0_CFG0_OSC0_XTAL_COUNT_VALUE_OFFS (21)
/* Silicon Oscillator parameters */
#define OSC0_CFG1_FTRIMOTP_MASK (0x3FF00000)
#define OSC0_CFG1_FTRIMOTP_OFFS (20)
#define OSC0_CFG1_SI_FREQ_SEL_MASK (0x00000300)
#define OSC0_CFG1_SI_FREQ_SEL_OFFS (8)
/* USB PLL enable bit*/
#define QM_USB_PLL_PDLD BIT(0)
/* USB PLL has locked when this bit is 1*/
@ -1159,4 +1503,14 @@ typedef enum {
#define QM_PMUX_SLEW2 (REG_VAL(0xB0800918))
#define QM_PMUX_SLEW3 (REG_VAL(0xB080091C))
/**
* Version variables.
*/
#if (UNIT_TEST)
uint32_t test_rom_version;
#define ROM_VERSION_ADDRESS &test_rom_version;
#else
#define ROM_VERSION_ADDRESS (0xFFFFFFEC);
#endif
#endif /* __REGISTERS_H__ */

View file

@ -0,0 +1,104 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __VREG_H__
#define __VREG_H__
#include "qm_common.h"
#include "qm_soc_regs.h"
typedef enum {
VREG_MODE_SWITCHING = 0,
VREG_MODE_LINEAR,
VREG_MODE_SHUTDOWN,
VREG_MODE_NUM,
} vreg_mode_t;
/**
* Voltage Regulators Control.
*
* @defgroup groupVREG Quark SE Voltage Regulators
* @{
*/
/**
* Set AON Voltage Regulator mode.
*
* The AON Voltage Regulator is not a
* switching regulator and only acts as
* a linear regulator.
* VREG_SWITCHING_MODE is not a value mode
* for the AON Voltage Regulator.
*
* @param[in] mode Voltage Regulator mode.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int vreg_aon_set_mode(const vreg_mode_t mode);
/**
* Set Platform 3P3 Voltage Regulator mode.
*
* @param[in] mode Voltage Regulator mode.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int vreg_plat3p3_set_mode(const vreg_mode_t mode);
/**
* Set Platform 1P8 Voltage Regulator mode.
*
* @param[in] mode Voltage Regulator mode.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int vreg_plat1p8_set_mode(const vreg_mode_t mode);
/**
* Set Host Voltage Regulator mode.
*
* @param[in] mode Voltage Regulator mode.
*
* @return Standard errno return type for QMSI.
* @retval 0 on success.
* @retval Negative @ref errno for possible error codes.
*/
int vreg_host_set_mode(const vreg_mode_t mode);
/**
* @}
*/
#endif /* __VREG_H__ */

View file

@ -22,11 +22,9 @@
#include <nanokernel.h>
#include <rtc.h>
#include "qm_isr.h"
#include "qm_rtc.h"
static struct device *rtc_qmsi_dev;
static void (*user_callback)(struct device *dev);
static void rtc_qmsi_enable(struct device *dev)
{
@ -38,11 +36,6 @@ static void rtc_qmsi_disable(struct device *dev)
clk_periph_disable(CLK_PERIPH_RTC_REGISTER);
}
static void rtc_callback(void)
{
if (user_callback)
user_callback(rtc_qmsi_dev);
}
static int rtc_qmsi_set_config(struct device *dev, struct rtc_config *cfg)
{
@ -51,11 +44,14 @@ static int rtc_qmsi_set_config(struct device *dev, struct rtc_config *cfg)
qm_cfg.init_val = cfg->init_val;
qm_cfg.alarm_en = cfg->alarm_enable;
qm_cfg.alarm_val = cfg->alarm_val;
qm_cfg.callback = rtc_callback;
/* Casting callback type due different input parameter from QMSI
* compared aganst the Zephyr callback from void cb(struct device *dev)
* to void cb(void *)
*/
qm_cfg.callback = (void *) cfg->cb_fn;
qm_cfg.callback_data = dev;
user_callback = cfg->cb_fn;
if (qm_rtc_set_config(QM_RTC_0, &qm_cfg) != QM_RC_OK)
if (qm_rtc_set_config(QM_RTC_0, &qm_cfg))
return -EIO;
return 0;
@ -63,7 +59,7 @@ static int rtc_qmsi_set_config(struct device *dev, struct rtc_config *cfg)
static int rtc_qmsi_set_alarm(struct device *dev, const uint32_t alarm_val)
{
return qm_rtc_set_alarm(QM_RTC_0, alarm_val) == QM_RC_OK ? 0 : -EIO;
return qm_rtc_set_alarm(QM_RTC_0, alarm_val);
}
static uint32_t rtc_qmsi_read(struct device *dev)
@ -96,5 +92,3 @@ static int rtc_qmsi_init(struct device *dev)
DEVICE_AND_API_INIT(rtc, CONFIG_RTC_0_NAME, &rtc_qmsi_init, NULL, NULL,
SECONDARY, CONFIG_KERNEL_INIT_PRIORITY_DEVICE,
(void *)&api);
static struct device *rtc_qmsi_dev = DEVICE_GET(rtc);

View file

@ -13,7 +13,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <errno.h>
#include <device.h>
@ -21,13 +20,11 @@
#include <uart.h>
#include "qm_uart.h"
#include "qm_scss.h"
#include "qm_isr.h"
#include "clk.h"
#include "qm_soc_regs.h"
#define IIR_IID_NO_INTERRUPT_PENDING 0x01
#define IIR_IID_RECV_DATA_AVAIL 0x04
#define IIR_IID_CHAR_TIMEOUT 0x0C
#define IER_ELSI 0x04
#define DIVISOR_LOW(baudrate) \
((CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC / (16 * baudrate)) & 0xFF)
@ -113,7 +110,9 @@ DEVICE_INIT(uart_1, CONFIG_UART_QMSI_1_NAME, uart_qmsi_init, &drv_data_1,
static int uart_qmsi_poll_in(struct device *dev, unsigned char *data)
{
qm_uart_t instance = GET_CONTROLLER_INSTANCE(dev);
qm_uart_status_t status = qm_uart_get_status(instance);
qm_uart_status_t status;
qm_uart_get_status(instance, &status);
/* In order to check if there is any data to read from UART
* controller we should check if the QM_UART_RX_BUSY bit from
@ -123,7 +122,7 @@ static int uart_qmsi_poll_in(struct device *dev, unsigned char *data)
if (!(status & QM_UART_RX_BUSY))
return -1;
qm_uart_read(instance, data);
qm_uart_read(instance, data, NULL);
return 0;
}
@ -139,18 +138,20 @@ static unsigned char uart_qmsi_poll_out(struct device *dev,
static int uart_qmsi_err_check(struct device *dev)
{
qm_uart_t instance = GET_CONTROLLER_INSTANCE(dev);
qm_uart_status_t status;
/* QMSI and Zephyr use the same bits to represent UART errors
* so we don't need to translate each error bit from QMSI API
* to Zephyr API.
*/
return qm_uart_get_status(instance) & QM_UART_LSR_ERROR_BITS;
qm_uart_get_status(instance, &status);
return (status & QM_UART_LSR_ERROR_BITS);
}
#if CONFIG_UART_INTERRUPT_DRIVEN
static bool is_tx_fifo_full(qm_uart_t instance)
{
return !!(QM_UART[instance].lsr & QM_UART_LSR_THRE);
return !!(QM_UART[instance]->lsr & QM_UART_LSR_THRE);
}
static int uart_qmsi_fifo_fill(struct device *dev, const uint8_t *tx_data,
@ -160,7 +161,7 @@ static int uart_qmsi_fifo_fill(struct device *dev, const uint8_t *tx_data,
int i;
for (i = 0; i < size && !is_tx_fifo_full(instance); i++) {
QM_UART[instance].rbr_thr_dll = tx_data[i];
QM_UART[instance]->rbr_thr_dll = tx_data[i];
}
return i;
@ -168,7 +169,7 @@ static int uart_qmsi_fifo_fill(struct device *dev, const uint8_t *tx_data,
static bool is_data_ready(qm_uart_t instance)
{
return QM_UART[instance].lsr & QM_UART_LSR_DR;
return QM_UART[instance]->lsr & QM_UART_LSR_DR;
}
static int uart_qmsi_fifo_read(struct device *dev, uint8_t *rx_data,
@ -178,7 +179,7 @@ static int uart_qmsi_fifo_read(struct device *dev, uint8_t *rx_data,
int i;
for (i = 0; i < size && is_data_ready(instance); i++) {
rx_data[i] = QM_UART[instance].rbr_thr_dll;
rx_data[i] = QM_UART[instance]->rbr_thr_dll;
}
return i;
@ -188,14 +189,14 @@ static void uart_qmsi_irq_tx_enable(struct device *dev)
{
qm_uart_t instance = GET_CONTROLLER_INSTANCE(dev);
QM_UART[instance].ier_dlh |= QM_UART_IER_ETBEI;
QM_UART[instance]->ier_dlh |= QM_UART_IER_ETBEI;
}
static void uart_qmsi_irq_tx_disable(struct device *dev)
{
qm_uart_t instance = GET_CONTROLLER_INSTANCE(dev);
QM_UART[instance].ier_dlh &= ~QM_UART_IER_ETBEI;
QM_UART[instance]->ier_dlh &= ~QM_UART_IER_ETBEI;
}
static int uart_qmsi_irq_tx_ready(struct device *dev)
@ -211,21 +212,21 @@ static int uart_qmsi_irq_tx_empty(struct device *dev)
qm_uart_t instance = GET_CONTROLLER_INSTANCE(dev);
const uint32_t mask = (QM_UART_LSR_TEMT | QM_UART_LSR_THRE);
return (QM_UART[instance].lsr & mask) == mask;
return (QM_UART[instance]->lsr & mask) == mask;
}
static void uart_qmsi_irq_rx_enable(struct device *dev)
{
qm_uart_t instance = GET_CONTROLLER_INSTANCE(dev);
QM_UART[instance].ier_dlh |= QM_UART_IER_ERBFI;
QM_UART[instance]->ier_dlh |= QM_UART_IER_ERBFI;
}
static void uart_qmsi_irq_rx_disable(struct device *dev)
{
qm_uart_t instance = GET_CONTROLLER_INSTANCE(dev);
QM_UART[instance].ier_dlh &= ~QM_UART_IER_ERBFI;
QM_UART[instance]->ier_dlh &= ~QM_UART_IER_ERBFI;
}
static int uart_qmsi_irq_rx_ready(struct device *dev)
@ -233,21 +234,22 @@ static int uart_qmsi_irq_rx_ready(struct device *dev)
struct uart_qmsi_drv_data *drv_data = dev->driver_data;
uint32_t id = (drv_data->iir_cache & QM_UART_IIR_IID_MASK);
return (id == IIR_IID_RECV_DATA_AVAIL) || (id == IIR_IID_CHAR_TIMEOUT);
return (id == QM_UART_IIR_RECV_DATA_AVAIL) ||
(id == QM_UART_IIR_CHAR_TIMEOUT);
}
static void uart_qmsi_irq_err_enable(struct device *dev)
{
qm_uart_t instance = GET_CONTROLLER_INSTANCE(dev);
QM_UART[instance].ier_dlh |= IER_ELSI;
QM_UART[instance]->ier_dlh |= QM_UART_IER_ELSI;
}
static void uart_qmsi_irq_err_disable(struct device *dev)
{
qm_uart_t instance = GET_CONTROLLER_INSTANCE(dev);
QM_UART[instance].ier_dlh &= ~IER_ELSI;
QM_UART[instance]->ier_dlh &= ~QM_UART_IER_ELSI;
}
static int uart_qmsi_irq_is_pending(struct device *dev)
@ -263,7 +265,7 @@ static int uart_qmsi_irq_update(struct device *dev)
qm_uart_t instance = GET_CONTROLLER_INSTANCE(dev);
struct uart_qmsi_drv_data *drv_data = dev->driver_data;
drv_data->iir_cache = QM_UART[instance].iir_fcr;
drv_data->iir_cache = QM_UART[instance]->iir_fcr;
return 1;
}

View file

@ -23,13 +23,13 @@
#include <spi.h>
#include <gpio.h>
#include "qm_scss.h"
#include "qm_spi.h"
#include "clk.h"
#include "qm_isr.h"
struct pending_transfer {
struct device *dev;
qm_spi_async_transfer_t xfer;
int counter;
};
static struct pending_transfer pending_transfers[QM_SPI_NUM];
@ -44,7 +44,7 @@ struct spi_qmsi_runtime {
struct device *gpio_cs;
device_sync_call_t sync;
qm_spi_config_t cfg;
qm_rc_t rc;
int rc;
bool loopback;
};
@ -92,50 +92,28 @@ static int spi_qmsi_configure(struct device *dev,
return 0;
}
static void pending_transfer_complete(uint32_t id, qm_rc_t rc)
static void transfer_complete(void *data, int error, qm_spi_status_t status,
uint16_t len)
{
struct pending_transfer *pending = &pending_transfers[id];
struct spi_qmsi_config *spi_config =
((struct device *)data)->config->config_info;
qm_spi_t spi = spi_config->spi;
struct pending_transfer *pending = &pending_transfers[spi];
struct device *dev = pending->dev;
struct spi_qmsi_runtime *context;
qm_spi_config_t *cfg;
if (!dev)
return;
context = dev->driver_data;
cfg = &context->cfg;
pending->counter++;
/*
* When it is TX/RX transfer this function will be called twice.
*/
if (cfg->transfer_mode == QM_SPI_TMOD_TX_RX && pending->counter == 1)
return;
spi_control_cs(dev, false);
pending->dev = NULL;
pending->counter = 0;
context->rc = rc;
context->rc = error;
device_sync_call_complete(&context->sync);
}
static void spi_qmsi_tx_callback(uint32_t id, uint32_t len)
{
pending_transfer_complete(id, QM_RC_OK);
}
static void spi_qmsi_rx_callback(uint32_t id, uint32_t len)
{
pending_transfer_complete(id, QM_RC_OK);
}
static void spi_qmsi_err_callback(uint32_t id, qm_rc_t err)
{
pending_transfer_complete(id, err);
}
static int spi_qmsi_slave_select(struct device *dev, uint32_t slave)
{
struct spi_qmsi_config *spi_config = dev->config->config_info;
@ -158,8 +136,8 @@ static inline uint8_t frame_size_to_dfs(qm_spi_frame_size_t frame_size)
}
static int spi_qmsi_transceive(struct device *dev,
const void *tx_buf, uint32_t tx_buf_len,
void *rx_buf, uint32_t rx_buf_len)
const void *tx_buf, uint32_t tx_buf_len,
void *rx_buf, uint32_t rx_buf_len)
{
struct spi_qmsi_config *spi_config = dev->config->config_info;
qm_spi_t spi = spi_config->spi;
@ -167,7 +145,7 @@ static int spi_qmsi_transceive(struct device *dev,
qm_spi_config_t *cfg = &context->cfg;
uint8_t dfs = frame_size_to_dfs(cfg->frame_size);
qm_spi_async_transfer_t *xfer;
qm_rc_t rc;
int rc;
if (pending_transfers[spi].dev)
return -EBUSY;
@ -177,12 +155,13 @@ static int spi_qmsi_transceive(struct device *dev,
xfer->rx = rx_buf;
xfer->rx_len = rx_buf_len / dfs;
/* This cast is necessary to drop the "const" modifier, since QMSI xfer
* does not take a const pointer.
*/
xfer->tx = (uint8_t *)tx_buf;
xfer->tx_len = tx_buf_len / dfs;
xfer->id = spi;
xfer->tx_callback = spi_qmsi_tx_callback;
xfer->rx_callback = spi_qmsi_rx_callback;
xfer->err_callback = spi_qmsi_err_callback;
xfer->callback_data = dev;
xfer->callback = transfer_complete;
if (tx_buf_len == 0)
cfg->transfer_mode = QM_SPI_TMOD_RX;
@ -199,13 +178,13 @@ static int spi_qmsi_transceive(struct device *dev,
QM_SPI[spi]->ctrlr0 |= BIT(11);
rc = qm_spi_set_config(spi, cfg);
if (rc != QM_RC_OK)
if (rc != 0)
return -EINVAL;
spi_control_cs(dev, true);
rc = qm_spi_irq_transfer(spi, xfer);
if (rc != QM_RC_OK) {
if (rc != 0) {
spi_control_cs(dev, false);
return -EIO;
}

View file

@ -19,19 +19,18 @@
#include <watchdog.h>
#include <ioapic.h>
#include "clk.h"
#include "qm_interrupt.h"
#include "qm_isr.h"
#include "qm_wdt.h"
#include "qm_scss.h"
static void (*user_cb)(struct device *dev);
static void interrupt_callback(void);
/* global variable to track qmsi wdt conf */
static qm_wdt_config_t qm_cfg;
static void get_config(struct device *dev, struct wdt_config *cfg)
{
qm_wdt_config_t qm_cfg;
qm_wdt_get_config(QM_WDT_0, &qm_cfg);
cfg->timeout = qm_cfg.timeout;
cfg->mode = (qm_cfg.mode == QM_WDT_MODE_RESET) ?
WDT_MODE_RESET : WDT_MODE_INTERRUPT_RESET;
@ -40,19 +39,18 @@ static void get_config(struct device *dev, struct wdt_config *cfg)
static int set_config(struct device *dev, struct wdt_config *cfg)
{
qm_wdt_config_t qm_cfg;
user_cb = cfg->interrupt_fn;
qm_cfg.timeout = cfg->timeout;
qm_cfg.mode = (cfg->mode == WDT_MODE_RESET) ?
QM_WDT_MODE_RESET : QM_WDT_MODE_INTERRUPT_RESET;
qm_cfg.callback = interrupt_callback;
qm_cfg.callback = (void *)user_cb;
qm_cfg.callback_data = dev;
if (qm_wdt_set_config(QM_WDT_0, &qm_cfg) != 0) {
return -EIO;
}
qm_wdt_set_config(QM_WDT_0, &qm_cfg);
qm_wdt_start(QM_WDT_0);
return 0;
return qm_wdt_start(QM_WDT_0) == 0 ? 0 : -EIO;
}
static void reload(struct device *dev)
@ -78,15 +76,10 @@ static struct wdt_driver_api api = {
.reload = reload,
};
void wdt_qmsi_isr(void *arg)
{
qm_wdt_isr_0();
}
static int init(struct device *dev)
{
IRQ_CONNECT(QM_IRQ_WDT_0, CONFIG_WDT_0_IRQ_PRI,
wdt_qmsi_isr, 0, IOAPIC_EDGE | IOAPIC_HIGH);
qm_wdt_isr_0, 0, IOAPIC_EDGE | IOAPIC_HIGH);
/* Unmask watchdog interrupt */
irq_enable(QM_IRQ_WDT_0);
@ -100,15 +93,3 @@ static int init(struct device *dev)
DEVICE_AND_API_INIT(wdt, CONFIG_WDT_0_NAME, init, 0, 0,
PRIMARY, CONFIG_KERNEL_INIT_PRIORITY_DEVICE,
(void *)&api);
/* Define 'struct device' variable which is passed to the ISR. Even if it
* is not used by the ISR code, we require it in order to be able to pass
* the device argument from the user callback (wdt_config->interrupt_fn).
*/
struct device *wdt_qmsi_isr_dev = DEVICE_GET(wdt);
static void interrupt_callback(void)
{
if (user_cb)
user_cb(wdt_qmsi_isr_dev);
}