How do I get implied volatility from TWS into R using IBrokers? - ibrokers

Currently I have modified some code I found here to read in bid/ask prices for options in R. Then I feed those back to TWS using calculateImpliedVolatility to get implied volatility. It seems I should be able to get them without the second step using .twsTickType$MODEL_OPTION. I have tried to modify the same code I used for bid/ask prices but have been unable to get it to work. This is what I have tried:
eWrapper.data.Opt_Model <- function(n) {
eW <- eWrapper(NULL) # use basic template
eW$assign.Data("data", rep(list(structure(.xts(matrix(rep(NA_real_,8),nc=8),0),
.Dimnames=list(NULL,c("ImpVol","Delta","tv","pvdiv","gamma","vega",'theta','spot')))),n))
eW$tickPrice <- function(curMsg, msg, timestamp, file, ...)
{
tickType = msg[3]
msg <- as.numeric(msg)
id <- msg[2] #as.numeric(msg[2])
data <- eW$get.Data("data") #[[1]] # list position of symbol (by id == msg[2])
attr(data[[id]],"index") <- as.numeric(Sys.time())
nr.data <- NROW(data[[id]])
if(tickType == .twsTickType$MODEL_OPTION) {
data[[id]][nr.data,1:8] <- msg[4:11]
}
#else
# if(tickType == .twsTickType$ASK) {
# data[[id]][nr.data,2] <- msg[4]
# }
eW$assign.Data("data", data)
c(curMsg, msg)
}
return(eW)
}

It took some time, but I got it to work.
> eWrapper.data.Opt_Model <- function(n) { eW <- eWrapper(NULL) # use
> basic template eW$assign.Data("data",
> rep(list(structure(.xts(matrix(rep(NA_real_,8),nc=8),0),
> .Dimnames=list(NULL,c('modelOption: impVol: ',' delta: ',' modelPrice:
> ',' pvDiv ',' gamma: ',' vega: ',' theta: ',' undPrice: ')))),n))
> eW$tickOptionComputation <- function(curMsg, msg, timestamp, file, ...) {
> tickType = msg[3]
> msg <- as.numeric(msg)
> id <- msg[2] #as.numeric(msg[2])
> data <- eW$get.Data("data") #[[1]] # list position of symbol (by id == msg[2])
> attr(data[[id]],"index") <- as.numeric(Sys.time())
> nr.data <- NROW(data[[id]])
> if(tickType == .twsTickType$MODEL_OPTION) {
> data[[id]][nr.data,1:8] <- msg[4:11]
> }
> #else
> # if(tickType == .twsTickType$ASK) {
> # data[[id]][nr.data,2] <- msg[4]
> # }
> eW$assign.Data("data", data)
> c(curMsg, msg) }
> return(eW) }

Related

GO concatenate string with int

I’ve the following code which needs to get int value and add it to a string with string suffix. E.g.
At start I'm getting this
"fds data "
After the if statement it should like this
"fds data 10 M"
This is the code:
ltrCfg := "fds data "
if len(cfg.ltrSharedDicts) > 0 {
ltrCfg += strconv.Itoa(cfg.ltrSharedDicts["c_data"])
ltrCfg += "M"
} else {
ltrCfg += "10M"
}
out = append(out, ltrCfg)
ltrCert := “fds data "
if len(cfg.ltrSharedDicts) > 0 {
ltrCert += strconv.Itoa(cfg.ltrSharedDicts["d_data"])
ltrCert += "M"
} else {
ltrCert += “20M"
}
out = append(out, ltrCert)
The code is working but I wonder for the first fork of the if statement
if len(cfg.ltrSharedDicts) > 0 {
ltrCfg += strconv.Itoa(cfg.ltrSharedDicts["c_data"])
ltrCfg += "M"
Is there a better way to achieve it?
For readability, I would write:
cd, ok := cfg.ltrSharedDicts["c_data"]
if !ok {
cd = 10
}
out = append(out, fmt.Sprintf("fds data %dM", cd))

Downloading multiple PDFs using Rselenium

I'm trying to download multiple PDFs by navigating on a page. Even though I'm able to navigate on the page using drop down and download PDF at the end. I am getting this error:
An element command failed because the referenced element is no longer attached to the DOM.
Below is my code:
library(RSelenium)
library(stringr)
rd<-rsDriver()
remDr<-rd[["client"]]
remDr$navigate("http://secc.gov.in/lgdStateList")
#First drop down
stateEle<-remDr$findElement("id", "lgdState")
states<-stateEle$getElementText()[[1]] %>% strsplit(., '\\n') %>% unlist %>% str_trim('left')
states<-states[-1]
for (i in 1:length(states)) {
stateEle$clickElement()
stateEle$sendKeysToElement(list(states[i]))
stateEle$clickElement()
#Second drop down
distEle<-remDr$findElement("id", "lgdDistrict")
districts<-distEle$getElementText()[[1]] %>% strsplit(., '\\n') %>% unlist%>%str_trim('left')
districts<-districts[-1]
for (j in 1:length(districts)) {
distEle$clickElement()
distEle$sendKeysToElement(list(districts[[j]]))
distEle$clickElement()
#Third drop down
blockEle<-remDr$findElement("id", "lgdBlock")
block<-blockEle$getElementText()[[1]] %>% strsplit(., '\\n') %>% unlist%>%str_trim('left')
block<-block[-1]
for (k in 1:length(block)) {
blockEle$clickElement()
blockEle$sendKeysToElement(list(block[[k]]))
blockEle$clickElement()
gpEle<-remDr$findElements('class', 'statesrow')
for (m in 1:length(gpEle)) {
h<-unlist(gpEle[[m]]$getElementAttribute('innerHTML'))
h<-unlist(h%>% strsplit(., '<td>'))
h<-h[-1]
for (n in 1:length(h)) {
xpath1<-paste('//*[#id="example"]/tbody/tr[',m,']/td[',n,']/a')
pdfEle<-remDr$findElement('xpath', xpath1)
pdfEle$clickElement()
Sys.sleep(5)
}
}
}
}
}
As per your request
library(rvest)
url<-"http://secc.gov.in/lgdStateList"
page<-html_session(url)
## STATE LOOP ##
state <- html_nodes(page,css="#lgdState > option") %>% html_text()
state <- state[-1]
state_id <- html_nodes(page,css="#lgdState > option") %>% html_attr('value')
state_id <- state_id[-1]
for(i in 1:length(state)){
page1<-rvest:::request_POST(page, url="http://secc.gov.in/lgdDistrictList",
body=list(
"stateCode"=state_id[i]
),
encode="form")
## DISTRICT LOOP ##
district <- html_nodes(page1,css="#lgdDistrict > option") %>% html_text()
district <- district[-1]
district_id <- html_nodes(page1,css="#lgdDistrict > option") %>% html_attr('value')
district_id <- district_id[-1]
for(j in 1:length(district)){
page2<-rvest:::request_POST(page1,url="http://secc.gov.in/lgdBlockList",
body=list(
"stateCode"=state_id[i],
"districtCode"=district_id[j]
),
encode = "form")
## BLOCK LOOP ##
block <- html_nodes(page2, css="#lgdBlock > option") %>% html_text()
block <- block [-1]
block_id <- html_nodes(page2, css="#lgdBlock > option") %>% html_attr('value')
block_id <- block_id[-1]
for(k in 1:length(block)){
page3<-rvest:::request_POST(page2,url="http://secc.gov.in/lgdGpList",
body=list(
"stateCode"=state_id[i],
"districtCode"=district_id[j],
"blockCode"=block_id[k]
),
encode = "form")
txt <- html_nodes(page3,css="#example a") %>% html_attr("onclick")
library(stringr)
gpcode<-sapply(txt,function(x){
k <- str_extract_all(x, "\\([^()]+\\)")[[1]]
k <- substring(k, 2, nchar(k)-1)
regexp <- "[[:digit:]]+"
k <- str_extract(strsplit(k, ",")[[1]][4], regexp)
})
## GP CODE LOOP to download file ##
for(l in 1:length(gpcode)){
page4<-rvest:::request_POST(page3,url="http://secc.gov.in/downloadLgdwisePdfFile",
body=list(
"stateCode"=state_id[i],
"districtCode"=district_id[j],
"blockCode"=block_id[k],
"gpCode"=gpcode[l]
),
encode = "form")
error = "PDF File for this Gram Panchayat is not available."
error_displayed = try(html_nodes(page4,css=".error") %>% html_text())
if(error != error_displayed){
filename<-gsub("attachment;filename=","",page4$response$headers$`content-disposition`)
filename<-str_replace_all(filename, '"', "")
writeBin(page4$response$content,filename)
}
}
}
}
}
This is again without RSelenium. :)

Sort array by filename in groovy

I'm trying to sort a list of jars by their filenames:
def jars = ['app-5.0.0.jar', 'app-5.1.1.jar', 'app-5.2.0-9.jar', 'app-5.2.0-10.jar', 'app-5.2.0.jar', 'app-5.1.0.jar']
jars = jars.sort().reverse()
println jars
The result is:
[app-5.2.0.jar, app-5.2.0-9.jar, app-5.2.0-10.jar, app-5.1.1.jar, app-5.1.0.jar, app-5.0.0.jar]
However, I'm more interested in the natural (and probably more intuitive) sorting to receive this sorted list:
[app-5.2.0-10.jar, app-5.2.0-9.jar, app-5.2.0.jar, app-5.1.1.jar, app-5.1.0.jar, app-5.0.0.jar]
Is there a way to achieve this?
this is my current algorithm for sorting but it's too verbose in my opinion. However, it really does what I'm looking for. Each part of the version (major, minor, maintenance, build) is evaluated independently:
jars = jars.sort { a, b ->
File fileA = new File(a)
File fileB = new File(b)
def partsA = fileA.name.findAll(/\d+/)
def partsB = fileB.name.findAll(/\d+/)
if (partsA[0] == null) partsA[0] = "0"
if (partsB[0] == null) partsB[0] = "0"
if (partsA[0].toInteger() < partsB[0].toInteger()) {
println "${partsA[0]} < ${partsB[0]}"
return -1
} else if (partsA[0].toInteger() > partsB[0].toInteger()) {
println "${partsA[0]} > ${partsB[0]}"
return 1
} else {
if (partsA[1] == null) partsA[1] = "0"
if (partsB[1] == null) partsB[1] = "0"
if (partsA[1].toInteger() < partsB[1].toInteger()) {
println "${partsA[1]} < ${partsB[1]}"
return -1
} else if (partsA[1].toInteger() > partsB[1].toInteger()) {
println "${partsA[1]} > ${partsB[1]}"
return 1
} else {
if (partsA[2] == null) partsA[2] = "0"
if (partsB[2] == null) partsB[2] = "0"
if (partsA[2].toInteger() < partsB[2].toInteger()) {
println "${partsA[2]} < ${partsB[2]}"
return -1
} else if (partsA[2].toInteger() > partsB[2].toInteger()) {
println "${partsA[2]} > ${partsB[2]}"
return 1
} else {
if (partsA[3] == null) partsA[3] = "0"
if (partsB[3] == null) partsB[3] = "0"
if (partsA[3].toInteger() < partsB[3].toInteger()) {
println "${partsA[3]} < ${partsB[3]}"
return -1
} else if (partsA[3].toInteger() > partsB[3].toInteger()) {
println "${partsA[3]} > ${partsB[3]}"
return 1
} else {
println "${partsA[3]} = ${partsB[3]}"
return 0
}
}
}
}
}
Had to try this:
def jars = ['app-5.0.0.jar', 'app-5.1.1.jar', 'app-5.2.0-9.jar', 'app-5.2.0-10.jar', 'app-5.2.0.jar', 'app-5.1.0.jar', 'app-1.0.jar', 'app-0.10.jar']
jars = jars.sort{ -it.findAll( /\d+/ ).join().toInteger() }
println jars
Gets:
[app-5.2.0-10.jar, app-5.2.0-9.jar, app-5.2.0.jar, app-5.1.1.jar, app-5.1.0.jar, app-5.0.0.jar, app-1.0.jar, app-0.10.jar]
Or more thorough version that handles large patch versions:
def jars = ['app-5.0.0.jar', 'app-5.1.1.jar', 'app-5.2.0-9.jar', 'app-5.2.0-10.jar', 'app-5.2.0.jar', 'app-5.1.0.jar', 'app-5.1.1-172.jar']
jars.sort{ a, b ->
def aList = a.findAll(/\d+/)
def bList = b.findAll(/\d+/)
for ( int i = 0 ; i < aList.size() ; i++ ) {
def aVal = aList[i] ? aList[i].toInteger() : 0
def bVal = bList[i] ? bList[i].toInteger() : 0
if ( aVal <=> bVal ) { // only return if non-zero i.e. not equal
return aVal <=> bVal
}
}
bList.size() > aList.size() ? -1 : 0 // all facets match up to now, if b has additional parts it must be later version
}
println jars.reverse()
Gets:
[app-5.2.0-10.jar, app-5.2.0-9.jar, app-5.2.0.jar, app-5.1.1-172.jar, app-5.1.1.jar, app-5.1.0.jar, app-5.0.0.jar]
How about something like this:
def jars = ['app-5.0.0.jar', 'app-5.1.1.jar', 'app-5.2.0-9.jar', 'app-5.2.0-10.jar', 'app-5.2.0.jar', 'app-5.1.0.jar', 'app-5.1.1-172.jar']
// it is probably sufficient to just choose a "high enough" number
// (e.g. 10) instead of resolving max digits.
def maxDigits = jars*.findAll(/\d+/).flatten()*.size().max()
// sort the strings consisting of left-padded version numbers
// e.g. sorting string for 'app-5.1.1-172.jar' is ' 5 1 1172'
jars.sort{ it.findAll(/\d+/)*.padLeft(maxDigits).join() }
println 'max digits: ' + maxDigits
println jars.reverse()
Output:
max digits: 3
[app-5.2.0-10.jar, app-5.2.0-9.jar, app-5.2.0.jar, app-5.1.1-172.jar, app-5.1.1.jar, app-5.1.0.jar, app-5.0.0.jar]

Assignment throws error in cool

Compilation of this program throws
compilers#compilers-vm:~/cool/jim$ coolc list.cl
"list.cl", line 7: syntax error at or near ';'
Compilation halted due to lex and parse errors
class List{
item : String;
next : List;
init(i: String, n: List) : List
{
item <- i;
next <- n;
self
};
flattn(): String
{
if( isvoid next )
then
item
else
item.concat( next.flattn())
fi
};
};
class Main inherits IO {
main() : Object{
let hello : String <- "Hello ",
wold : String <- "world ",
newLine : String <- "\n",
unfedined : List,
l : List <- (new List).init(hello,
(new List).init(wold,
(new List).init(newLine, unfedined ) ) )
in
out_string( l.flattn() )
};
};
There must be a block that returns a value for init method.
here
item <- i;
next <- n;
self;
.. must be wrapped in to a block;
init(i: String, n: List) : List
{
{
item <- i;
next <- n;
self;
}
};

R: tm Textmining package: Doc-Level metadata generation is slow

I have a list of documents to process, and for each record I want to attach some metadata to the document "member" inside the "corpus" data structure that tm, the R package, generates (from reading in text files).
This for-loop works but it is very slow,
Performance seems to degrade as a function f ~ 1/n_docs.
for (i in seq(from= 1, to=length(corpus), by=1)){
if(opts$options$verbose == TRUE || i %% 50 == 0){
print(paste(i, " ", substr(corpus[[i]], 1, 140), sep = " "))
}
DublinCore(corpus[[i]], "title") = csv[[i,10]]
DublinCore(corpus[[i]], "Publisher" ) = csv[[i,16]] #institutions
}
This may do something to the corpus variable but I don't know what.
But when I put it inside a tm_map() (similar to lapply() function), it runs much faster, but the changes are not made persistent:
i = 0
corpus = tm_map(corpus, function(x){
i <<- i + 1
if(opts$options$verbose == TRUE){
print(paste(i, " ", substr(x, 1, 140), sep = " "))
}
meta(x, tag = "Heading") = csv[[i,10]]
meta(x, tag = "publisher" ) = csv[[i,16]]
})
Variable corpus has empty metadata fields after exiting the tm_map function. It should be filled. I have a few other things to do with the collection.
The R documentation for the meta() function says this:
Examples:
data("crude")
meta(crude[[1]])
DublinCore(crude[[1]])
meta(crude[[1]], tag = "Topics")
meta(crude[[1]], tag = "Comment") <- "A short comment."
meta(crude[[1]], tag = "Topics") <- NULL
DublinCore(crude[[1]], tag = "creator") <- "Ano Nymous"
DublinCore(crude[[1]], tag = "Format") <- "XML"
DublinCore(crude[[1]])
meta(crude[[1]])
meta(crude)
meta(crude, type = "corpus")
meta(crude, "labels") <- 21:40
meta(crude)
I tried many of these calls (with var "corpus" instead of "crude"), but they do not seem to work.
Someone else once seemed to have had the same problem with a similar data set (forum post from 2009, no response)
Here's a bit of benchmarking...
With the for loop :
expr.for <- function() {
for (i in seq(from= 1, to=length(corpus), by=1)){
DublinCore(corpus[[i]], "title") = LETTERS[round(runif(26))]
DublinCore(corpus[[i]], "Publisher" ) = LETTERS[round(runif(26))]
}
}
microbenchmark(expr.for())
# Unit: milliseconds
# expr min lq median uq max
# 1 expr.for() 21.50504 22.40111 23.56246 23.90446 70.12398
With tm_map :
corpus <- crude
expr.map <- function() {
tm_map(corpus, function(x) {
meta(x, "title") = LETTERS[round(runif(26))]
meta(x, "Publisher" ) = LETTERS[round(runif(26))]
x
})
}
microbenchmark(expr.map())
# Unit: milliseconds
# expr min lq median uq max
# 1 expr.map() 5.575842 5.700616 5.796284 5.886589 8.753482
So the tm_map version, as you noticed, seems to be about 4 times faster.
In your question you say that the changes in the tm_map version are not persistent, it is because you don't return x at the end of your anonymous function. In the end it should be :
meta(x, tag = "Heading") = csv[[i,10]]
meta(x, tag = "publisher" ) = csv[[i,16]]
x

Resources